code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments lowerCAmelCase : int =logging.getLogger(__name__) @dataclass class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = field( default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} ) _snake_case = field(default=__lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} ) _snake_case = field( default=__lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) _snake_case = field(default=__lowerCAmelCase , metadata={'help': 'whether to use adafactor'} ) _snake_case = field( default=__lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} ) _snake_case = field( default=__lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} ) _snake_case = field(default=__lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} ) _snake_case = field( default=__lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} ) _snake_case = field( default='linear' , metadata={'help': F'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
15
def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) if n == 0: return 0 _lowerCamelCase : Tuple = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) _lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def A__ ( __A , __A , __A ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCamelCase : int = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) _lowerCamelCase : Optional[Any] = max_revenue return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _lowerCamelCase : Any = 0 for i in range(1 , n + 1 ): _lowerCamelCase : Any = max_rev[i] for j in range(1 , i + 1 ): _lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) _lowerCamelCase : int = max_revenue_i return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' if n < 0: _lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): _lowerCamelCase : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ F"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = [6, 10, 12, 15, 20, 23] _lowerCamelCase : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCamelCase : Tuple = 36 _lowerCamelCase : Any = top_down_cut_rod(__A , __A ) _lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A ) _lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
15
1
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = StableUnCLIPPipeline _snake_case = TEXT_TO_IMAGE_PARAMS _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case = False def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = 32 _lowerCamelCase : Optional[int] = embedder_hidden_size # prior components torch.manual_seed(0) _lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") torch.manual_seed(0) _lowerCamelCase : str = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=_UpperCamelCase , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )) torch.manual_seed(0) _lowerCamelCase : Optional[int] = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCamelCase , num_layers=1 , ) torch.manual_seed(0) _lowerCamelCase : Optional[Any] = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_UpperCamelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0) _lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=_UpperCamelCase) _lowerCamelCase : Dict = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""") torch.manual_seed(0) _lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") torch.manual_seed(0) _lowerCamelCase : Optional[int] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )) torch.manual_seed(0) _lowerCamelCase : Optional[int] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCamelCase , layers_per_block=1 , upcast_attention=_UpperCamelCase , use_linear_projection=_UpperCamelCase , ) torch.manual_seed(0) _lowerCamelCase : Tuple = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , ) torch.manual_seed(0) _lowerCamelCase : str = AutoencoderKL() _lowerCamelCase : str = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Dict=0) ->Dict: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : Any = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : Union[str, Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : int = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[Any] = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int) ->List[str]: """simple docstring""" _lowerCamelCase : List[str] = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=_UpperCamelCase) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict) ->Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: """simple docstring""" _lowerCamelCase : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""") _lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa) pipe.to(_UpperCamelCase) pipe.set_progress_bar_config(disable=_UpperCamelCase) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _lowerCamelCase : List[str] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe("""anime turle""" , generator=_UpperCamelCase , output_type="""np""") _lowerCamelCase : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : str) ->int: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa) _lowerCamelCase : Optional[Any] = pipe.to(_UpperCamelCase) pipe.set_progress_bar_config(disable=_UpperCamelCase) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _lowerCamelCase : int = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
15
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
1
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: """simple docstring""" torch.manual_seed(0) _lowerCamelCase : List[str] = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return model @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" torch.manual_seed(0) _lowerCamelCase : Tuple = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , ) return model @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any: """simple docstring""" torch.manual_seed(0) _lowerCamelCase : Optional[Any] = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , ) _lowerCamelCase : Tuple = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return vqvae, unet @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCamelCase : Optional[int] = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) _lowerCamelCase : int = DDPMScheduler() _lowerCamelCase : str = AudioDiffusionPipeline(vqvae=_UpperCamelCase , unet=self.dummy_unet , mel=_UpperCamelCase , scheduler=_UpperCamelCase) _lowerCamelCase : Optional[int] = pipe.to(_UpperCamelCase) pipe.set_progress_bar_config(disable=_UpperCamelCase) _lowerCamelCase : Any = torch.Generator(device=_UpperCamelCase).manual_seed(42) _lowerCamelCase : Union[str, Any] = pipe(generator=_UpperCamelCase , steps=4) _lowerCamelCase : List[str] = output.audios[0] _lowerCamelCase : Tuple = output.images[0] _lowerCamelCase : Optional[int] = torch.Generator(device=_UpperCamelCase).manual_seed(42) _lowerCamelCase : List[str] = pipe(generator=_UpperCamelCase , steps=4 , return_dict=_UpperCamelCase) _lowerCamelCase : Optional[int] = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _lowerCamelCase : Any = np.frombuffer(image.tobytes() , dtype="""uint8""")[:10] _lowerCamelCase : str = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""")[:10] _lowerCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() == 0 _lowerCamelCase : List[Any] = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) _lowerCamelCase : str = DDIMScheduler() _lowerCamelCase : Any = self.dummy_vqvae_and_unet _lowerCamelCase : Optional[Any] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_UpperCamelCase , scheduler=_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe.to(_UpperCamelCase) pipe.set_progress_bar_config(disable=_UpperCamelCase) np.random.seed(0) _lowerCamelCase : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,)) _lowerCamelCase : Optional[int] = torch.Generator(device=_UpperCamelCase).manual_seed(42) _lowerCamelCase : List[str] = pipe(raw_audio=_UpperCamelCase , generator=_UpperCamelCase , start_step=5 , steps=10) _lowerCamelCase : List[Any] = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _lowerCamelCase : int = np.frombuffer(image.tobytes() , dtype="""uint8""")[:10] _lowerCamelCase : Optional[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0 _lowerCamelCase : Any = self.dummy_unet_condition _lowerCamelCase : int = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=_UpperCamelCase , mel=_UpperCamelCase , scheduler=_UpperCamelCase) _lowerCamelCase : Optional[int] = pipe.to(_UpperCamelCase) pipe.set_progress_bar_config(disable=_UpperCamelCase) np.random.seed(0) _lowerCamelCase : Dict = torch.rand((1, 1, 10)) _lowerCamelCase : List[Any] = pipe(generator=_UpperCamelCase , encoding=_UpperCamelCase) _lowerCamelCase : Tuple = output.images[0] _lowerCamelCase : int = np.frombuffer(image.tobytes() , dtype="""uint8""")[:10] _lowerCamelCase : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0 @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str) ->Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Dict = torch_device _lowerCamelCase : Optional[Any] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""") _lowerCamelCase : int = pipe.to(_UpperCamelCase) pipe.set_progress_bar_config(disable=_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device=_UpperCamelCase).manual_seed(42) _lowerCamelCase : Union[str, Any] = pipe(generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.audios[0] _lowerCamelCase : List[str] = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _lowerCamelCase : Dict = np.frombuffer(image.tobytes() , dtype="""uint8""")[:10] _lowerCamelCase : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0
15
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : Optional[Any] =16 lowerCAmelCase : Dict =32 def A__ ( __A , __A = 16 ): '''simple docstring''' _lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _lowerCamelCase : Optional[int] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__A ): # max_length=None => use the model max length (it's actually the default) _lowerCamelCase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowerCamelCase : Tuple = datasets.map( __A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCamelCase : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__A ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowerCamelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowerCamelCase : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _lowerCamelCase : int = 8 else: _lowerCamelCase : Optional[int] = None return tokenizer.pad( __A , padding="""longest""" , max_length=__A , pad_to_multiple_of=__A , return_tensors="""pt""" , ) # Instantiate dataloaders. _lowerCamelCase : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A ) _lowerCamelCase : Any = DataLoader( tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : List[Any] =mocked_dataloaders # noqa: F811 def A__ ( __A , __A ): '''simple docstring''' # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __A ) == "1": _lowerCamelCase : Any = 2 # New Code # _lowerCamelCase : Any = int(args.gradient_accumulation_steps ) # Initialize accelerator _lowerCamelCase : Union[str, Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__A ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCamelCase : List[str] = config["""lr"""] _lowerCamelCase : Tuple = int(config["""num_epochs"""] ) _lowerCamelCase : Tuple = int(config["""seed"""] ) _lowerCamelCase : Union[str, Any] = int(config["""batch_size"""] ) _lowerCamelCase : List[str] = evaluate.load("""glue""" , """mrpc""" ) set_seed(__A ) _lowerCamelCase , _lowerCamelCase : str = get_dataloaders(__A , __A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCamelCase : str = model.to(accelerator.device ) # Instantiate optimizer _lowerCamelCase : List[str] = AdamW(params=model.parameters() , lr=__A ) # Instantiate scheduler _lowerCamelCase : Any = get_linear_schedule_with_warmup( optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = accelerator.prepare( __A , __A , __A , __A , __A ) # Now we train the model for epoch in range(__A ): model.train() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__A ): _lowerCamelCase : Dict = model(**__A ) _lowerCamelCase : str = output.loss accelerator.backward(__A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__A ) _lowerCamelCase : Optional[int] = outputs.logits.argmax(dim=-1 ) _lowerCamelCase , _lowerCamelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__A , references=__A , ) _lowerCamelCase : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__A , default=__A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=__A , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _lowerCamelCase : Any = parser.parse_args() _lowerCamelCase : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__A , __A ) if __name__ == "__main__": main()
15
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Optional[int] =logging.get_logger(__name__) def A__ ( __A , __A=False ): '''simple docstring''' _lowerCamelCase : List[str] = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("""head""" ): _lowerCamelCase : List[str] = """segformer.encoder.""" + key if key.startswith("""backbone""" ): _lowerCamelCase : List[str] = key.replace("""backbone""" , """segformer.encoder""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : List[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(__A )-1}""" ) if "norm" in key: _lowerCamelCase : int = key.replace("""norm""" , """layer_norm""" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Any = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )] _lowerCamelCase : int = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(__A )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Dict = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: _lowerCamelCase : str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Tuple = key[key.find("""block""" ) + len("""block""" )] _lowerCamelCase : List[str] = key.replace(F"""block{idx}""" , F"""block.{int(__A )-1}""" ) if "attn.q" in key: _lowerCamelCase : List[Any] = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: _lowerCamelCase : Union[str, Any] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: _lowerCamelCase : int = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: _lowerCamelCase : int = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: _lowerCamelCase : Tuple = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: _lowerCamelCase : Tuple = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: _lowerCamelCase : List[str] = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) _lowerCamelCase : List[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Any = key[key.find("""linear_c""" ) + len("""linear_c""" )] _lowerCamelCase : List[str] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(__A )-1}""" ) if key.startswith("""head""" ): _lowerCamelCase : str = key.replace("""head""" , """classifier""" ) _lowerCamelCase : Optional[int] = value return new_state_dict def A__ ( __A , __A ): '''simple docstring''' # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Any = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Union[str, Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : List[str] = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : Tuple = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : int = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Dict = kv_bias[ config.hidden_sizes[i] : ] def A__ ( ): '''simple docstring''' _lowerCamelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCamelCase : List[str] = Image.open(requests.get(__A , stream=__A ).raw ) return image @torch.no_grad() def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : str = SegformerConfig() _lowerCamelCase : Union[str, Any] = False # set attributes based on model_name _lowerCamelCase : int = """huggingface/label-files""" if "segformer" in model_name: _lowerCamelCase : Optional[int] = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2] if "ade" in model_name: _lowerCamelCase : int = 150 _lowerCamelCase : int = """ade20k-id2label.json""" _lowerCamelCase : Tuple = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : Optional[Any] = 19 _lowerCamelCase : Tuple = """cityscapes-id2label.json""" _lowerCamelCase : Union[str, Any] = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : Tuple = True _lowerCamelCase : Optional[Any] = model_name[4:6] _lowerCamelCase : str = 1_000 _lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" _lowerCamelCase : str = (1, 1_000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : str = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : List[str] = {int(__A ): v for k, v in idalabel.items()} _lowerCamelCase : int = idalabel _lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : List[str] = 256 elif size == "b2": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : str = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Any = [64, 128, 320, 512] _lowerCamelCase : Any = 768 _lowerCamelCase : Any = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Union[str, Any] = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : Dict = [64, 128, 320, 512] _lowerCamelCase : Optional[int] = 768 _lowerCamelCase : Dict = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : List[Any] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A ) # prepare image _lowerCamelCase : Optional[Any] = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(images=__A , return_tensors="""pt""" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(__A , map_location=torch.device("""cpu""" ) ) else: _lowerCamelCase : List[str] = torch.load(__A , map_location=torch.device("""cpu""" ) )["""state_dict"""] # rename keys _lowerCamelCase : Any = rename_keys(__A , encoder_only=__A ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(__A , __A ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : str = False _lowerCamelCase : List[str] = SegformerForImageClassification(__A ) else: _lowerCamelCase : Union[str, Any] = SegformerForSemanticSegmentation(__A ) model.load_state_dict(__A ) model.eval() # forward pass _lowerCamelCase : str = model(__A ) _lowerCamelCase : Optional[Any] = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [ [-1.13_72E01, -1.27_87E01, -1.34_77E01], [-1.25_36E01, -1.41_94E01, -1.44_09E01], [-1.32_17E01, -1.48_88E01, -1.53_27E01], ], [ [-1.47_91E01, -1.71_22E01, -1.82_77E01], [-1.71_63E01, -1.91_92E01, -1.95_33E01], [-1.78_97E01, -1.99_91E01, -2.03_15E01], ], [ [7.67_23E-01, 4.19_21E-01, -7.78_78E-02], [4.77_72E-01, 9.55_57E-03, -2.80_82E-01], [3.60_32E-01, -2.48_26E-01, -5.11_68E-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : int = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: _lowerCamelCase : Tuple = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , __A , atol=1E-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(__A ).mkdir(exist_ok=__A ) model.save_pretrained(__A ) image_processor.save_pretrained(__A ) if __name__ == "__main__": lowerCAmelCase : Tuple =argparse.ArgumentParser() parser.add_argument( "--model_name", default="segformer.b0.512x512.ade.160k", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) lowerCAmelCase : List[Any] =parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
15
from __future__ import annotations lowerCAmelCase : int =[] def A__ ( __A , __A , __A ): '''simple docstring''' for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def A__ ( __A , __A ): '''simple docstring''' if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): _lowerCamelCase : int = 1 solve(__A , row + 1 ) _lowerCamelCase : List[str] = 0 return False def A__ ( __A ): '''simple docstring''' for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) lowerCAmelCase : int =8 lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
15
1
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets lowerCAmelCase : Tuple ="\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" lowerCAmelCase : List[Any] ="\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n" lowerCAmelCase : Any ="\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]: """simple docstring""" if version.parse(scb.__version__) < version.parse("""1.4.12"""): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""") return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence"""), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""), }) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[ """https://github.com/m-popovic/chrF""", ] , ) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : int = CHRF.CHAR_ORDER , _UpperCamelCase : int = CHRF.WORD_ORDER , _UpperCamelCase : int = CHRF.BETA , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , ) ->List[str]: """simple docstring""" _lowerCamelCase : Union[str, Any] = len(references[0]) if any(len(_UpperCamelCase) != references_per_prediction for refs in references): raise ValueError("""Sacrebleu requires the same number of references for each prediction""") _lowerCamelCase : str = [[refs[i] for refs in references] for i in range(_UpperCamelCase)] _lowerCamelCase : List[Any] = CHRF(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = sb_chrf.corpus_score(_UpperCamelCase , _UpperCamelCase) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
15
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
1
lowerCAmelCase : str =[ (1000, "M"), (900, "CM"), (500, "D"), (400, "CD"), (100, "C"), (90, "XC"), (50, "L"), (40, "XL"), (10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def A__ ( __A ): '''simple docstring''' _lowerCamelCase : str = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000} _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Union[str, Any] = 0 while place < len(__A ): if (place + 1 < len(__A )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def A__ ( __A ): '''simple docstring''' _lowerCamelCase : List[Any] = [] for arabic, roman in ROMAN: ((_lowerCamelCase) , (_lowerCamelCase)) : List[str] = divmod(__A , __A ) result.append(roman * factor ) if number == 0: break return "".join(__A ) if __name__ == "__main__": import doctest doctest.testmod()
15
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = 0 for ch in input_str: _lowerCamelCase : Optional[Any] = ord(__A ) _lowerCamelCase : List[str] = pow(2 , __A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
15
1
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 lowerCAmelCase : Tuple =sys.version_info >= (3, 10) def A__ ( __A=None , __A=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=__A ) @dataclass class __snake_case : '''simple docstring''' _snake_case = 42 _snake_case = 42 _snake_case = 42 _snake_case = 42 @dataclass class __snake_case : '''simple docstring''' _snake_case = 42 _snake_case = field(default='toto' , metadata={'help': 'help message'} ) @dataclass class __snake_case : '''simple docstring''' _snake_case = False _snake_case = True _snake_case = None class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = 'titi' _snake_case = 'toto' class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = 'titi' _snake_case = 'toto' _snake_case = 42 @dataclass class __snake_case : '''simple docstring''' _snake_case = "toto" def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" _lowerCamelCase : Union[str, Any] = BasicEnum(self.foo) @dataclass class __snake_case : '''simple docstring''' _snake_case = "toto" def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Union[str, Any] = MixedTypeEnum(self.foo) @dataclass class __snake_case : '''simple docstring''' _snake_case = None _snake_case = field(default=__lowerCAmelCase , metadata={'help': 'help message'} ) _snake_case = None _snake_case = list_field(default=[] ) _snake_case = list_field(default=[] ) @dataclass class __snake_case : '''simple docstring''' _snake_case = list_field(default=[] ) _snake_case = list_field(default=[1, 2, 3] ) _snake_case = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _snake_case = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class __snake_case : '''simple docstring''' _snake_case = field() _snake_case = field() _snake_case = field() def _SCREAMING_SNAKE_CASE ( self : Dict) ->Any: """simple docstring""" _lowerCamelCase : Dict = BasicEnum(self.required_enum) @dataclass class __snake_case : '''simple docstring''' _snake_case = 42 _snake_case = field() _snake_case = None _snake_case = field(default='toto' , metadata={'help': 'help message'} ) _snake_case = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class __snake_case : '''simple docstring''' _snake_case = False _snake_case = True _snake_case = None @dataclass class __snake_case : '''simple docstring''' _snake_case = None _snake_case = field(default=__lowerCAmelCase , metadata={'help': 'help message'} ) _snake_case = None _snake_case = list_field(default=[] ) _snake_case = list_field(default=[] ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser) ->Tuple: """simple docstring""" self.assertEqual(len(a._actions) , len(b._actions)) for x, y in zip(a._actions , b._actions): _lowerCamelCase : Optional[Any] = {k: v for k, v in vars(_UpperCamelCase).items() if k != """container"""} _lowerCamelCase : Any = {k: v for k, v in vars(_UpperCamelCase).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , _UpperCamelCase) and yy.get("""choices""" , _UpperCamelCase): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](_UpperCamelCase) , yy["""type"""](_UpperCamelCase)) del xx["type"], yy["type"] self.assertEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : List[Any] = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : Tuple = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=_UpperCamelCase , required=_UpperCamelCase) expected.add_argument("""--bar""" , type=_UpperCamelCase , required=_UpperCamelCase) expected.add_argument("""--baz""" , type=_UpperCamelCase , required=_UpperCamelCase) expected.add_argument("""--flag""" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="""?""") self.argparsersEqual(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((_lowerCamelCase) , ) : Tuple = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase) self.assertFalse(example.flag) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Any = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : List[Any] = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=_UpperCamelCase) expected.add_argument("""--baz""" , default="""toto""" , type=_UpperCamelCase , help="""help message""") self.argparsersEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: """simple docstring""" _lowerCamelCase : Tuple = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="""?""") expected.add_argument("""--baz""" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="""?""") # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=_UpperCamelCase , dest="""baz""") expected.add_argument("""--opt""" , type=_UpperCamelCase , default=_UpperCamelCase) _lowerCamelCase : List[str] = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_UpperCamelCase) for dataclass_type in dataclass_types: _lowerCamelCase : Optional[Any] = HfArgumentParser(_UpperCamelCase) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = parser.parse_args([]) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase)) _lowerCamelCase : Any = parser.parse_args(["""--foo""", """--no_baz"""]) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase)) _lowerCamelCase : Union[str, Any] = parser.parse_args(["""--foo""", """--baz"""]) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase)) _lowerCamelCase : int = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""]) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase)) _lowerCamelCase : Optional[Any] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""]) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase)) def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" _lowerCamelCase : int = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : Dict = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42]) , ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Dict = parser.parse_args([]) self.assertEqual(args.foo , """toto""") _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses([])[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto) _lowerCamelCase : Tuple = parser.parse_args(["""--foo""", """titi"""]) self.assertEqual(args.foo , """titi""") _lowerCamelCase : int = parser.parse_args_into_dataclasses(["""--foo""", """titi"""])[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi) _lowerCamelCase : List[str] = parser.parse_args(["""--foo""", """42"""]) self.assertEqual(args.foo , 42) _lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""])[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo) def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: """simple docstring""" @dataclass class __snake_case : '''simple docstring''' _snake_case = "toto" _lowerCamelCase : List[Any] = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : Tuple = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42]) , ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Tuple = parser.parse_args([]) self.assertEqual(args.foo , """toto""") _lowerCamelCase : Any = parser.parse_args(["""--foo""", """titi"""]) self.assertEqual(args.foo , """titi""") _lowerCamelCase : str = parser.parse_args(["""--foo""", """42"""]) self.assertEqual(args.foo , 42) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: """simple docstring""" _lowerCamelCase : Optional[Any] = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : Any = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=_UpperCamelCase) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=_UpperCamelCase) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_UpperCamelCase) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = parser.parse_args([]) self.assertEqual( _UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3]) , ) _lowerCamelCase : Any = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split()) self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7])) def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=_UpperCamelCase , type=_UpperCamelCase) expected.add_argument("""--bar""" , default=_UpperCamelCase , type=_UpperCamelCase , help="""help message""") expected.add_argument("""--baz""" , default=_UpperCamelCase , type=_UpperCamelCase) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=_UpperCamelCase) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=_UpperCamelCase) _lowerCamelCase : str = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_UpperCamelCase) for dataclass_type in dataclass_types: _lowerCamelCase : Tuple = HfArgumentParser(_UpperCamelCase) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = parser.parse_args([]) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[])) _lowerCamelCase : List[Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split()) self.assertEqual(_UpperCamelCase , Namespace(foo=12 , bar=3.1_4 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3])) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]: """simple docstring""" _lowerCamelCase : str = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : str = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=_UpperCamelCase , required=_UpperCamelCase) expected.add_argument("""--required_str""" , type=_UpperCamelCase , required=_UpperCamelCase) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""]) , choices=["""titi""", """toto"""] , required=_UpperCamelCase , ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: """simple docstring""" _lowerCamelCase : str = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=_UpperCamelCase , required=_UpperCamelCase) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""]) , choices=["""titi""", """toto"""] , required=_UpperCamelCase , ) expected.add_argument("""--opt""" , type=_UpperCamelCase , default=_UpperCamelCase) expected.add_argument("""--baz""" , default="""toto""" , type=_UpperCamelCase , help="""help message""") expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_UpperCamelCase) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: """simple docstring""" _lowerCamelCase : List[str] = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : List[Any] = { """foo""": 12, """bar""": 3.1_4, """baz""": """42""", """flag""": True, } _lowerCamelCase : int = parser.parse_dict(_UpperCamelCase)[0] _lowerCamelCase : List[Any] = BasicExample(**_UpperCamelCase) self.assertEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Dict = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : Tuple = { """foo""": 12, """bar""": 3.1_4, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple: """simple docstring""" _lowerCamelCase : str = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : List[str] = { """foo""": 12, """bar""": 3.1_4, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: _lowerCamelCase : Union[str, Any] = os.path.join(_UpperCamelCase , """temp_json""") os.mkdir(_UpperCamelCase) with open(temp_local_path + """.json""" , """w+""") as f: json.dump(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = parser.parse_yaml_file(Path(temp_local_path + """.json"""))[0] _lowerCamelCase : Dict = BasicExample(**_UpperCamelCase) self.assertEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple: """simple docstring""" _lowerCamelCase : Tuple = HfArgumentParser(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = { """foo""": 12, """bar""": 3.1_4, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: _lowerCamelCase : int = os.path.join(_UpperCamelCase , """temp_yaml""") os.mkdir(_UpperCamelCase) with open(temp_local_path + """.yaml""" , """w+""") as f: yaml.dump(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + """.yaml"""))[0] _lowerCamelCase : Any = BasicExample(**_UpperCamelCase) self.assertEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : Optional[int] = HfArgumentParser(_UpperCamelCase) self.assertIsNotNone(_UpperCamelCase)
15
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""") _lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : str = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""") _lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
15
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : Optional[int] =16 lowerCAmelCase : int =32 def A__ ( __A , __A , __A , __A , __A = 16 ): '''simple docstring''' _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _lowerCamelCase : List[Any] = DatasetDict( { """train""": dataset["""train"""].select(__A ), """validation""": dataset["""train"""].select(__A ), """test""": dataset["""validation"""], } ) def tokenize_function(__A ): # max_length=None => use the model max length (it's actually the default) _lowerCamelCase : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowerCamelCase : List[str] = datasets.map( __A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCamelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__A ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowerCamelCase : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowerCamelCase : Union[str, Any] = 16 elif accelerator.mixed_precision != "no": _lowerCamelCase : List[Any] = 8 else: _lowerCamelCase : Optional[int] = None return tokenizer.pad( __A , padding="""longest""" , max_length=__A , pad_to_multiple_of=__A , return_tensors="""pt""" , ) # Instantiate dataloaders. _lowerCamelCase : Any = DataLoader( tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A ) _lowerCamelCase : Union[str, Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A ) _lowerCamelCase : Union[str, Any] = DataLoader( tokenized_datasets["""test"""] , shuffle=__A , collate_fn=__A , batch_size=__A ) return train_dataloader, eval_dataloader, test_dataloader def A__ ( __A , __A ): '''simple docstring''' # New Code # _lowerCamelCase : Optional[int] = [] # Download the dataset _lowerCamelCase : int = load_dataset("""glue""" , """mrpc""" ) # Create our splits _lowerCamelCase : List[Any] = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _lowerCamelCase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCamelCase : str = config["""lr"""] _lowerCamelCase : List[Any] = int(config["""num_epochs"""] ) _lowerCamelCase : List[str] = int(config["""seed"""] ) _lowerCamelCase : Tuple = int(config["""batch_size"""] ) _lowerCamelCase : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _lowerCamelCase : List[str] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _lowerCamelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE _lowerCamelCase : str = MAX_GPU_BATCH_SIZE set_seed(__A ) # New Code # # Create our folds: _lowerCamelCase : str = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _lowerCamelCase : Dict = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__A ): _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = get_fold_dataloaders( __A , __A , __A , __A , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCamelCase : List[str] = model.to(accelerator.device ) # Instantiate optimizer _lowerCamelCase : Any = AdamW(params=model.parameters() , lr=__A ) # Instantiate scheduler _lowerCamelCase : Optional[Any] = get_linear_schedule_with_warmup( optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = accelerator.prepare( __A , __A , __A , __A , __A ) # Now we train the model for epoch in range(__A ): model.train() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _lowerCamelCase : Optional[int] = model(**__A ) _lowerCamelCase : Tuple = outputs.loss _lowerCamelCase : int = loss / gradient_accumulation_steps accelerator.backward(__A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCamelCase : List[str] = model(**__A ) _lowerCamelCase : Tuple = outputs.logits.argmax(dim=-1 ) _lowerCamelCase , _lowerCamelCase : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__A , references=__A , ) _lowerCamelCase : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __A ) # New Code # # We also run predictions on the test set at the very end _lowerCamelCase : Optional[Any] = [] for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCamelCase : Union[str, Any] = model(**__A ) _lowerCamelCase : Tuple = outputs.logits _lowerCamelCase , _lowerCamelCase : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__A , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _lowerCamelCase : Any = torch.cat(__A , dim=0 ) _lowerCamelCase : Dict = torch.stack(__A , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _lowerCamelCase : List[Any] = metric.compute(predictions=__A , references=__A ) accelerator.print("""Average test metrics from all folds:""" , __A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__A , default=__A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__A , default=3 , help="""The number of splits to perform across the dataset""" ) _lowerCamelCase : Tuple = parser.parse_args() _lowerCamelCase : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__A , __A ) if __name__ == "__main__": main()
15
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : List[Any] =logging.get_logger(__name__) lowerCAmelCase : Optional[int] ="▁" lowerCAmelCase : Dict ={"vocab_file": "spiece.model"} lowerCAmelCase : Optional[int] ={ "vocab_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" ) } } lowerCAmelCase : Union[str, Any] ={ "google/reformer-crime-and-punishment": 524288, } class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ['input_ids', 'attention_mask'] def __init__( self : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]="</s>" , _UpperCamelCase : int="<unk>" , _UpperCamelCase : List[Any]=[] , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Any , ) ->None: """simple docstring""" _lowerCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) _lowerCamelCase : Union[str, Any] = vocab_file _lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(_UpperCamelCase) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: """simple docstring""" return self.sp_model.get_piece_size() def _SCREAMING_SNAKE_CASE ( self : str) ->Dict[str, int]: """simple docstring""" _lowerCamelCase : List[str] = {self.convert_ids_to_tokens(_UpperCamelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int) ->List[str]: """simple docstring""" _lowerCamelCase : Dict = self.__dict__.copy() _lowerCamelCase : Optional[int] = None return state def __setstate__( self : List[Any] , _UpperCamelCase : Dict) ->List[str]: """simple docstring""" _lowerCamelCase : List[Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _lowerCamelCase : Dict = {} _lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str) ->List[str]: """simple docstring""" return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Optional[int]) ->Optional[int]: """simple docstring""" return self.sp_model.piece_to_id(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Union[str, Any]) ->Union[str, Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): _lowerCamelCase : Union[str, Any] = self.sp_model.IdToPiece(_UpperCamelCase) return token def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Optional[int]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = [] _lowerCamelCase : Optional[Any] = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_UpperCamelCase) + token _lowerCamelCase : str = [] else: current_sub_tokens.append(_UpperCamelCase) out_string += self.sp_model.decode(_UpperCamelCase) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return _lowerCamelCase : Optional[int] = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCamelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _UpperCamelCase) elif not os.path.isfile(self.vocab_file): with open(_UpperCamelCase , """wb""") as fi: _lowerCamelCase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase) return (out_vocab_file,)
15
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
1
from __future__ import annotations import os from typing import Any import requests lowerCAmelCase : Any ="https://api.github.com" # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user lowerCAmelCase : Union[str, Any] =BASE_URL + "/user" # https://github.com/settings/tokens lowerCAmelCase : Optional[int] =os.environ.get("USER_TOKEN", "") def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Dict = { """Authorization""": F"""token {auth_token}""", """Accept""": """application/vnd.github.v3+json""", } return requests.get(__A , headers=__A ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F"""{key}: {value}""") else: raise ValueError("'USER_TOKEN' field cannot be empty.")
15
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
1
def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = word.split() def justify(__A , __A , __A ) -> str: _lowerCamelCase : List[str] = max_width - width _lowerCamelCase : Any = len(__A ) if len(__A ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _lowerCamelCase : Optional[Any] = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _lowerCamelCase : str = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _lowerCamelCase : Union[str, Any] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__A ): num_spaces_between_words_list[i] += 1 _lowerCamelCase : Any = [] for i in range(__A ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * """ """ ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__A ) _lowerCamelCase : Optional[Any] = [] _lowerCamelCase : list[str] = [] _lowerCamelCase : Any = 0 for word in words: if width + len(__A ) + len(__A ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__A ) width += len(__A ) else: # justify the line and add it to result answer.append(justify(__A , __A , __A ) ) # reset new line and new width _lowerCamelCase , _lowerCamelCase : List[str] = [word], len(__A ) _lowerCamelCase : List[Any] = max_width - width - len(__A ) answer.append(""" """.join(__A ) + (remaining_spaces + 1) * """ """ ) return answer if __name__ == "__main__": from doctest import testmod testmod()
15
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
1
import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __snake_case ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = LxmertTokenizer _snake_case = LxmertTokenizerFast _snake_case = True _snake_case = True def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: """simple docstring""" super().setUp() _lowerCamelCase : Any = [ """[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[str]) ->int: """simple docstring""" _lowerCamelCase : Any = """UNwant\u00E9d,running""" _lowerCamelCase : Optional[int] = """unwanted, running""" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: """simple docstring""" _lowerCamelCase : List[str] = self.tokenizer_class(self.vocab_file) _lowerCamelCase : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""") self.assertListEqual(_UpperCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase) , [7, 4, 5, 10, 8, 9]) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: """simple docstring""" if not self.test_rust_tokenizer: return _lowerCamelCase : Tuple = self.get_tokenizer() _lowerCamelCase : int = self.get_rust_tokenizer() _lowerCamelCase : List[Any] = """I was born in 92000, and this is falsé.""" _lowerCamelCase : Tuple = tokenizer.tokenize(_UpperCamelCase) _lowerCamelCase : int = rust_tokenizer.tokenize(_UpperCamelCase) self.assertListEqual(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase) _lowerCamelCase : Optional[int] = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase) self.assertListEqual(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = self.get_rust_tokenizer() _lowerCamelCase : Optional[Any] = tokenizer.encode(_UpperCamelCase) _lowerCamelCase : Any = rust_tokenizer.encode(_UpperCamelCase) self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
15
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = {} _lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""] _lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments) lowerCAmelCase : int =parser.parse_args() if args.num_workers is None: lowerCAmelCase : Any =multiprocessing.cpu_count() lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase : str =time.time() lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase : Dict =time.time() lowerCAmelCase : Dict =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
15
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Tuple =logging.get_logger(__name__) lowerCAmelCase : int ="▁" lowerCAmelCase : Tuple ={"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} lowerCAmelCase : str ={ "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } lowerCAmelCase : Any ={"vinai/bartpho-syllable": 1024} class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ['input_ids', 'attention_mask'] def __init__( self : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any="<s>" , _UpperCamelCase : List[str]="</s>" , _UpperCamelCase : Any="</s>" , _UpperCamelCase : Tuple="<s>" , _UpperCamelCase : List[str]="<unk>" , _UpperCamelCase : Tuple="<pad>" , _UpperCamelCase : Any="<mask>" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Optional[int] , ) ->None: """simple docstring""" _lowerCamelCase : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else mask_token _lowerCamelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) _lowerCamelCase : Union[str, Any] = vocab_file _lowerCamelCase : Optional[int] = monolingual_vocab_file _lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(_UpperCamelCase)) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCamelCase : str = {} _lowerCamelCase : str = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_UpperCamelCase) not in self.fairseq_tokens_to_ids: _lowerCamelCase : Union[str, Any] = cnt cnt += 1 with open(_UpperCamelCase , """r""" , encoding="""utf-8""") as f: for line in f.readlines(): _lowerCamelCase : Union[str, Any] = line.strip().split()[0] _lowerCamelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids) if str(_UpperCamelCase) not in self.fairseq_tokens_to_ids: _lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids) _lowerCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any]) ->Tuple: """simple docstring""" _lowerCamelCase : int = self.__dict__.copy() _lowerCamelCase : Any = None _lowerCamelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , _UpperCamelCase : Dict) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _lowerCamelCase : Optional[Any] = {} _lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCamelCase : Dict = [self.cls_token_id] _lowerCamelCase : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase)) + [1] return [1] + ([0] * len(_UpperCamelCase)) + [1, 1] + ([0] * len(_UpperCamelCase)) + [1] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]: """simple docstring""" _lowerCamelCase : Optional[Any] = [self.sep_token_id] _lowerCamelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple: """simple docstring""" return len(self.fairseq_ids_to_tokens) def _SCREAMING_SNAKE_CASE ( self : str) ->int: """simple docstring""" _lowerCamelCase : Optional[Any] = {self.convert_ids_to_tokens(_UpperCamelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str) ->List[str]: """simple docstring""" return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Tuple) ->List[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" return self.fairseq_ids_to_tokens[index] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[Any]) ->Dict: """simple docstring""" _lowerCamelCase : Dict = """""".join(_UpperCamelCase).replace(_UpperCamelCase , """ """).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return _lowerCamelCase : Dict = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) _lowerCamelCase : str = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCamelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _UpperCamelCase) elif not os.path.isfile(self.vocab_file): with open(_UpperCamelCase , """wb""") as fi: _lowerCamelCase : int = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( _UpperCamelCase) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file , _UpperCamelCase) elif not os.path.isfile(self.monolingual_vocab_file): with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F"""{str(_UpperCamelCase)} \n""") return out_vocab_file, out_monolingual_vocab_file
15
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = IFPipeline _snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" return self._get_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : int = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa) _lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""") _lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""") del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCamelCase : str = None _lowerCamelCase : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components) _lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components) _lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : str = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Any = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : List[str] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def A__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
1
lowerCAmelCase : Dict =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)] def A__ ( __A ): '''simple docstring''' _lowerCamelCase : List[Any] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100_000] number //= 100_000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCAmelCase : list[bool | None] =[None] * 10000000 lowerCAmelCase : Tuple =True lowerCAmelCase : List[str] =False def A__ ( __A ): '''simple docstring''' if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _lowerCamelCase : Optional[int] = chain(next_number(__A ) ) _lowerCamelCase : int = number_chain while number < 10_000_000: _lowerCamelCase : Dict = number_chain number *= 10 return number_chain def A__ ( __A = 10_000_000 ): '''simple docstring''' for i in range(1 , __A ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__A ) if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution() = }""")
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Any =logging.get_logger(__name__) lowerCAmelCase : List[Any] ={ "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = 'swin' _snake_case = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[str] = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : Union[str, Any] = embed_dim _lowerCamelCase : str = depths _lowerCamelCase : str = len(_UpperCamelCase) _lowerCamelCase : Optional[Any] = num_heads _lowerCamelCase : Tuple = window_size _lowerCamelCase : int = mlp_ratio _lowerCamelCase : Optional[int] = qkv_bias _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Dict = use_absolute_embeddings _lowerCamelCase : int = layer_norm_eps _lowerCamelCase : str = initializer_range _lowerCamelCase : Dict = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1)) _lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)] _lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices( out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = version.parse('1.11' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float: """simple docstring""" return 1E-4
15
1
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['image_processor', 'tokenizer'] _snake_case = 'AutoImageProcessor' _snake_case = 'AutoTokenizer' def __init__( self : Any , _UpperCamelCase : List[str]=None , _UpperCamelCase : int=None , **_UpperCamelCase : str) ->Optional[int]: """simple docstring""" _lowerCamelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _UpperCamelCase , ) _lowerCamelCase : Optional[int] = kwargs.pop("""feature_extractor""") _lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Dict = self.image_processor _lowerCamelCase : int = False def __call__( self : str , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[int]) ->List[Any]: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_UpperCamelCase , **_UpperCamelCase) _lowerCamelCase : Optional[int] = kwargs.pop("""images""" , _UpperCamelCase) _lowerCamelCase : Any = kwargs.pop("""text""" , _UpperCamelCase) if len(_UpperCamelCase) > 0: _lowerCamelCase : Tuple = args[0] _lowerCamelCase : List[Any] = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""") if images is not None: _lowerCamelCase : Union[str, Any] = self.image_processor(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase) if text is not None: _lowerCamelCase : Optional[int] = self.tokenizer(_UpperCamelCase , **_UpperCamelCase) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : int = encodings["""input_ids"""] return inputs def _SCREAMING_SNAKE_CASE ( self : int , *_UpperCamelCase : List[str] , **_UpperCamelCase : Optional[Any]) ->Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : int) ->Union[str, Any]: """simple docstring""" return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase) @contextmanager def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]: """simple docstring""" warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""") _lowerCamelCase : Tuple = True _lowerCamelCase : Optional[Any] = self.tokenizer yield _lowerCamelCase : List[Any] = self.image_processor _lowerCamelCase : int = False def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=None) ->Tuple: """simple docstring""" if added_vocab is None: _lowerCamelCase : int = self.tokenizer.get_added_vocab() _lowerCamelCase : List[str] = {} while tokens: _lowerCamelCase : Union[str, Any] = re.search(R"""<s_(.*?)>""" , _UpperCamelCase , re.IGNORECASE) if start_token is None: break _lowerCamelCase : Union[str, Any] = start_token.group(1) _lowerCamelCase : Tuple = re.search(RF"""</s_{key}>""" , _UpperCamelCase , re.IGNORECASE) _lowerCamelCase : Tuple = start_token.group() if end_token is None: _lowerCamelCase : int = tokens.replace(_UpperCamelCase , """""") else: _lowerCamelCase : int = end_token.group() _lowerCamelCase : str = re.escape(_UpperCamelCase) _lowerCamelCase : int = re.escape(_UpperCamelCase) _lowerCamelCase : Optional[int] = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _UpperCamelCase , re.IGNORECASE) if content is not None: _lowerCamelCase : Tuple = content.group(1).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _lowerCamelCase : Optional[Any] = self.tokenajson(_UpperCamelCase , is_inner_value=_UpperCamelCase , added_vocab=_UpperCamelCase) if value: if len(_UpperCamelCase) == 1: _lowerCamelCase : Tuple = value[0] _lowerCamelCase : Tuple = value else: # leaf nodes _lowerCamelCase : Optional[Any] = [] for leaf in content.split(R"""<sep/>"""): _lowerCamelCase : Tuple = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _lowerCamelCase : Optional[Any] = leaf[1:-2] # for categorical special tokens output[key].append(_UpperCamelCase) if len(output[key]) == 1: _lowerCamelCase : List[str] = output[key][0] _lowerCamelCase : Optional[int] = tokens[tokens.find(_UpperCamelCase) + len(_UpperCamelCase) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_UpperCamelCase , added_vocab=_UpperCamelCase) if len(_UpperCamelCase): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _UpperCamelCase , ) return self.image_processor_class @property def _SCREAMING_SNAKE_CASE ( self : str) ->Dict: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _UpperCamelCase , ) return self.image_processor
15
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (EulerDiscreteScheduler,) _snake_case = 10 def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : str = torch.manual_seed(0) _lowerCamelCase : str = self.dummy_model() _lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : int = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Dict = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""") _lowerCamelCase : int = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : Dict = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.prev_sample _lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Tuple = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : List[Any] = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : int = output.prev_sample _lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
15
1
import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def A__ ( __A , __A , __A ): '''simple docstring''' # Construct model if gpta_config_file == "": _lowerCamelCase : Tuple = GPTaConfig() else: _lowerCamelCase : str = GPTaConfig.from_json_file(__A ) _lowerCamelCase : int = GPTaModel(__A ) # Load weights from numpy load_tf_weights_in_gpta(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME _lowerCamelCase : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--gpt2_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) lowerCAmelCase : Dict =parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
15
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : Dict ={"vocab_file": "vocab.json"} lowerCAmelCase : List[str] ={ "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } lowerCAmelCase : int ={"mgp-str": 27} class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]: """simple docstring""" super().__init__( unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , ) with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle: _lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase) _lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()} @property def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" return len(self.vocab) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = [] for s in text: char_tokens.extend(_UpperCamelCase) return char_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict: """simple docstring""" return self.decoder.get(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase)) return _lowerCamelCase : Tuple = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""") return (vocab_file,)
15
1
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (EulerDiscreteScheduler,) _snake_case = 10 def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : str = torch.manual_seed(0) _lowerCamelCase : str = self.dummy_model() _lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : int = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Dict = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""") _lowerCamelCase : int = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : Dict = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.prev_sample _lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Tuple = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : List[Any] = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : int = output.prev_sample _lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
15
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Tuple = ["""a""", """b""", """c"""] # Defaults to last layer if both are None _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""c"""]) self.assertEqual(_UpperCamelCase , [2]) # Out indices set to match out features _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features set to match out indices _lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features selected from negative indices _lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [-3, -1]) def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase) # Out features must be a list with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""]) # Out features must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""]) # Out indices must be a list or tuple with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""]) # Out indices must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""]) # Out features and out indices must be the same length with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""]) # Out features should match out indices with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""]) # Out features and out indices should be in order with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""]) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""]) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : int = BackboneMixin() _lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""] _lowerCamelCase : Tuple = ["""a""", """c"""] _lowerCamelCase : List[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly _lowerCamelCase : str = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""]) self.assertEqual(backbone.out_indices , [0, 1]) _lowerCamelCase : Optional[int] = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [-3, -1])
15
1
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase : int ="platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def A__ ( __A , __A , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , ): '''simple docstring''' if attention_mask is None: _lowerCamelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _lowerCamelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _lowerCamelCase : str = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCamelCase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowerCamelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class __snake_case : '''simple docstring''' def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict=13 , _UpperCamelCase : int=7 , _UpperCamelCase : str=True , _UpperCamelCase : List[str]=False , _UpperCamelCase : int=99 , _UpperCamelCase : Union[str, Any]=16 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any=32 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : str=1 , _UpperCamelCase : Any=0 , _UpperCamelCase : int=0.0_2 , ) ->List[str]: """simple docstring""" _lowerCamelCase : int = parent _lowerCamelCase : Optional[Any] = batch_size _lowerCamelCase : Union[str, Any] = seq_length _lowerCamelCase : Union[str, Any] = is_training _lowerCamelCase : List[str] = use_labels _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : List[str] = hidden_size _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : Any = hidden_act _lowerCamelCase : List[Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : List[Any] = eos_token_id _lowerCamelCase : List[Any] = pad_token_id _lowerCamelCase : List[Any] = bos_token_id _lowerCamelCase : Tuple = initializer_range def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: """simple docstring""" _lowerCamelCase : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size) _lowerCamelCase : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1) _lowerCamelCase : List[Any] = shift_tokens_right(_UpperCamelCase , 1 , 2) _lowerCamelCase : Optional[int] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , ) _lowerCamelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Optional[int] = 20 _lowerCamelCase : Tuple = model_class_name(_UpperCamelCase) _lowerCamelCase : Any = model.encode(inputs_dict["""input_ids"""]) _lowerCamelCase , _lowerCamelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _lowerCamelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""") _lowerCamelCase : Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowerCamelCase : Optional[Any] = model.decode( decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , ) _lowerCamelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""") _lowerCamelCase : int = model.decode( decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , ) _lowerCamelCase : Dict = model.decode(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""") def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : Optional[Any] = 20 _lowerCamelCase : Tuple = model_class_name(_UpperCamelCase) _lowerCamelCase : Any = model.encode(inputs_dict["""input_ids"""]) _lowerCamelCase , _lowerCamelCase : List[Any] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _lowerCamelCase : List[Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) _lowerCamelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Dict = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowerCamelCase : Dict = model.decode( decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , ) _lowerCamelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""") _lowerCamelCase : int = model.decode( decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , ) _lowerCamelCase : Tuple = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase) _lowerCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""") @require_flax class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = 99 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: """simple docstring""" _lowerCamelCase : int = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) _lowerCamelCase : List[Any] = input_ids.shape[0] _lowerCamelCase : Tuple = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self._get_config_and_data() _lowerCamelCase : Any = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase) _lowerCamelCase : List[Any] = lm_model(input_ids=_UpperCamelCase) _lowerCamelCase : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["""logits"""].shape , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Tuple = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) _lowerCamelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase) _lowerCamelCase : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa) _lowerCamelCase : Tuple = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa) _lowerCamelCase : Dict = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase) _lowerCamelCase : Any = (*summary.shape, config.vocab_size) self.assertEqual(outputs["""logits"""].shape , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: """simple docstring""" _lowerCamelCase : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa) _lowerCamelCase : int = shift_tokens_right(_UpperCamelCase , 1 , 2) _lowerCamelCase : Tuple = np.equal(_UpperCamelCase , 1).astype(np.floataa).sum() _lowerCamelCase : Dict = np.equal(_UpperCamelCase , 1).astype(np.floataa).sum() self.assertEqual(shifted.shape , input_ids.shape) self.assertEqual(_UpperCamelCase , n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0] , 2).all()) @require_flax class __snake_case ( __lowerCAmelCase , unittest.TestCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = True _snake_case = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) _snake_case = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def _SCREAMING_SNAKE_CASE ( self : Any) ->Any: """simple docstring""" _lowerCamelCase : Any = FlaxBlenderbotModelTester(self) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _lowerCamelCase : Tuple = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model_class(_UpperCamelCase) @jax.jit def encode_jitted(_UpperCamelCase : str , _UpperCamelCase : Any=None , **_UpperCamelCase : Dict): return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase) with self.subTest("""JIT Enabled"""): _lowerCamelCase : Dict = encode_jitted(**_UpperCamelCase).to_tuple() with self.subTest("""JIT Disabled"""): with jax.disable_jit(): _lowerCamelCase : str = encode_jitted(**_UpperCamelCase).to_tuple() self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase)) for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase): self.assertEqual(jitted_output.shape , output.shape) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _lowerCamelCase : Any = model_class(_UpperCamelCase) _lowerCamelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""]) _lowerCamelCase : Union[str, Any] = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(_UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any]): return model.decode( decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , ) with self.subTest("""JIT Enabled"""): _lowerCamelCase : Dict = decode_jitted(**_UpperCamelCase).to_tuple() with self.subTest("""JIT Disabled"""): with jax.disable_jit(): _lowerCamelCase : Optional[int] = decode_jitted(**_UpperCamelCase).to_tuple() self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase)) for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase): self.assertEqual(jitted_output.shape , output.shape) @slow def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: _lowerCamelCase : Any = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""") # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _lowerCamelCase : Dict = np.ones((1, 1)) * model.config.eos_token_id _lowerCamelCase : int = model(_UpperCamelCase) self.assertIsNotNone(_UpperCamelCase) @unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""") @slow def _SCREAMING_SNAKE_CASE ( self : int) ->str: """simple docstring""" _lowerCamelCase : Any = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25} _lowerCamelCase : Dict = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True} _lowerCamelCase : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=_UpperCamelCase) _lowerCamelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""") _lowerCamelCase : List[Any] = ["""Sam"""] _lowerCamelCase : Dict = tokenizer(_UpperCamelCase , return_tensors="""jax""") _lowerCamelCase : Optional[int] = model.generate(**_UpperCamelCase , **_UpperCamelCase) _lowerCamelCase : Dict = """Sam is a great name. It means \"sun\" in Gaelic.""" _lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase) assert generated_txt[0].strip() == tgt_text
15
import math def A__ ( __A ): '''simple docstring''' assert isinstance(__A , __A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def A__ ( __A , __A=1 , **__A ): '''simple docstring''' _lowerCamelCase : Dict = factor * value _lowerCamelCase : str = value while not is_prime(__A ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **__A ) return value
15
1
def A__ ( __A , __A ): '''simple docstring''' return int((input_a, input_a).count(1 ) != 0 ) def A__ ( ): '''simple docstring''' assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
15
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple: """simple docstring""" super().__init__(*_UpperCamelCase , **_UpperCamelCase) requires_backends(self , """vision""") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[int] = {} if top_k is not None: _lowerCamelCase : str = top_k return {}, {}, postprocess_params def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict: """simple docstring""" return super().__call__(_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Tuple = load_image(_UpperCamelCase) _lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework) return model_inputs def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Any = self.model(**_UpperCamelCase) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str: """simple docstring""" if top_k > self.model.config.num_labels: _lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": _lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0] _lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase) elif self.framework == "tf": _lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0] _lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase) _lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"""Unsupported framework: {self.framework}""") _lowerCamelCase : str = scores.tolist() _lowerCamelCase : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
15
1
import requests lowerCAmelCase : Optional[Any] ="https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def A__ ( __A ): '''simple docstring''' # fetching a list of articles in json format _lowerCamelCase : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["""articles"""] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
15
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = ViTImageProcessor if is_vision_available() else None @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = (3, 32, 128) _lowerCamelCase : str = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase)))) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(_UpperCamelCase) + """\n""") _lowerCamelCase : Any = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) _lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) return image_input def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0) _lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : int = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""") _lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Optional[int] = """test""" _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase) _lowerCamelCase : Dict = tokenizer(_UpperCamelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = """test""" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase): processor() def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Any = processor.char_decode(_UpperCamelCase) _lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase) _lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = None _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = torch.randn(1 , 27 , 38) _lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257) _lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522) _lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
15
1
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__A , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__A ) return parser.parse_args() def A__ ( ): '''simple docstring''' _lowerCamelCase : List[str] = parse_args() # Import training_script as a module. _lowerCamelCase : List[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowerCamelCase : Optional[Any] = script_fpath.stem _lowerCamelCase : Dict = importlib.import_module(__A ) # Patch sys.argv _lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
15
1
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def A__ ( __A , __A , __A , __A , __A ): '''simple docstring''' # load base model _lowerCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(__A , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors _lowerCamelCase : str = load_file(__A ) _lowerCamelCase : int = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: _lowerCamelCase : Union[str, Any] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) _lowerCamelCase : Any = pipeline.text_encoder else: _lowerCamelCase : Union[str, Any] = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) _lowerCamelCase : Any = pipeline.unet # find the target layer _lowerCamelCase : Any = layer_infos.pop(0 ) while len(__A ) > -1: try: _lowerCamelCase : List[Any] = curr_layer.__getattr__(__A ) if len(__A ) > 0: _lowerCamelCase : List[Any] = layer_infos.pop(0 ) elif len(__A ) == 0: break except Exception: if len(__A ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: _lowerCamelCase : Union[str, Any] = layer_infos.pop(0 ) _lowerCamelCase : Optional[Any] = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(__A ) else: pair_keys.append(__A ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: _lowerCamelCase : int = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) _lowerCamelCase : Optional[Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__A , __A ).unsqueeze(2 ).unsqueeze(3 ) else: _lowerCamelCase : Tuple = state_dict[pair_keys[0]].to(torch.floataa ) _lowerCamelCase : Optional[Any] = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__A , __A ) # update visited list for item in pair_keys: visited.append(__A ) return pipeline if __name__ == "__main__": lowerCAmelCase : Any =argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") lowerCAmelCase : List[Any] =parser.parse_args() lowerCAmelCase : Tuple =args.base_model_path lowerCAmelCase : Dict =args.checkpoint_path lowerCAmelCase : Tuple =args.dump_path lowerCAmelCase : Dict =args.lora_prefix_unet lowerCAmelCase : List[Any] =args.lora_prefix_text_encoder lowerCAmelCase : Optional[Any] =args.alpha lowerCAmelCase : Optional[int] =convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowerCAmelCase : Optional[int] =pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
15
def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) if n == 0: return 0 _lowerCamelCase : Tuple = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) _lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def A__ ( __A , __A , __A ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCamelCase : int = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) _lowerCamelCase : Optional[Any] = max_revenue return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _lowerCamelCase : Any = 0 for i in range(1 , n + 1 ): _lowerCamelCase : Any = max_rev[i] for j in range(1 , i + 1 ): _lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) _lowerCamelCase : int = max_revenue_i return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' if n < 0: _lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): _lowerCamelCase : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ F"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = [6, 10, 12, 15, 20, 23] _lowerCamelCase : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCamelCase : Tuple = 36 _lowerCamelCase : Any = top_down_cut_rod(__A , __A ) _lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A ) _lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
15
1
from math import factorial, radians def A__ ( __A , __A = 18 , __A = 10 ): '''simple docstring''' _lowerCamelCase : List[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians _lowerCamelCase : List[Any] = radians(__A ) _lowerCamelCase : Tuple = angle_in_radians _lowerCamelCase : Any = 3 _lowerCamelCase : Any = -1 for _ in range(__A ): result += (b * (angle_in_radians**a)) / factorial(__A ) _lowerCamelCase : List[Any] = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(__A , __A ) if __name__ == "__main__": __import__("doctest").testmod()
15
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowerCAmelCase : List[Any] ={ "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def A__ ( __A ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def A__ ( __A , __A ): '''simple docstring''' if args.student_type == "roberta": _lowerCamelCase : Tuple = False elif args.student_type == "gpt2": _lowerCamelCase : Union[str, Any] = False def A__ ( __A , __A ): '''simple docstring''' if args.student_type == "roberta": _lowerCamelCase : Dict = False def A__ ( ): '''simple docstring''' _lowerCamelCase : Any = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=__A , required=__A , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=__A , required=__A , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=__A , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__A , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=__A , required=__A , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=__A , type=__A , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__A , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=__A , required=__A , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=__A , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=__A , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=__A , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=__A , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=__A , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=__A , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.15 , type=__A , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=__A , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=__A , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=__A , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=__A , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=__A , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=__A , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=__A , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__A , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.05 , type=__A , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__A , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=__A , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__A , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__A , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.02 , type=__A , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=__A , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=__A , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=__A , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=__A , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=__A , default=500 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=__A , default=4_000 , help="""Checkpoint interval.""" ) _lowerCamelCase : Dict = parser.parse_args() sanity_checks(__A ) # ARGS # init_gpu_params(__A ) set_seed(__A ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(F"""Param: {args}""" ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(__A ) , __A , indent=4 ) git_log(args.dump_path ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = MODEL_CLASSES[args.student_type] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _lowerCamelCase : Dict = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _lowerCamelCase : str = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _lowerCamelCase : Union[str, Any] = tokenizer.all_special_tokens.index(__A ) _lowerCamelCase : Optional[int] = tokenizer.all_special_ids[idx] logger.info(F"""Special tokens {special_tok_ids}""" ) _lowerCamelCase : List[str] = special_tok_ids _lowerCamelCase : int = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F"""Loading data from {args.data_file}""" ) with open(args.data_file , """rb""" ) as fp: _lowerCamelCase : Dict = pickle.load(__A ) if args.mlm: logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , """rb""" ) as fp: _lowerCamelCase : int = pickle.load(__A ) _lowerCamelCase : List[str] = np.maximum(__A , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _lowerCamelCase : Tuple = 0.0 # do not predict special tokens _lowerCamelCase : Optional[Any] = torch.from_numpy(__A ) else: _lowerCamelCase : int = None _lowerCamelCase : Tuple = LmSeqsDataset(params=__A , data=__A ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(F"""Loading student config from {args.student_config}""" ) _lowerCamelCase : Dict = student_config_class.from_pretrained(args.student_config ) _lowerCamelCase : Optional[int] = True if args.student_pretrained_weights is not None: logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" ) _lowerCamelCase : Any = student_model_class.from_pretrained(args.student_pretrained_weights , config=__A ) else: _lowerCamelCase : Dict = student_model_class(__A ) if args.n_gpu > 0: student.to(F"""cuda:{args.local_rank}""" ) logger.info("""Student loaded.""" ) # TEACHER # _lowerCamelCase : Optional[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__A ) if args.n_gpu > 0: teacher.to(F"""cuda:{args.local_rank}""" ) logger.info(F"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__A , __A ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__A , __A ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _lowerCamelCase : Optional[Any] = Distiller( params=__A , dataset=__A , token_probs=__A , student=__A , teacher=__A ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
15
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
1
from __future__ import annotations def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Dict = sorted(numsa + numsa ) _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(len(__A ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Dict =[float(x) for x in input("Enter the elements of first array: ").split()] lowerCAmelCase : Any =[float(x) for x in input("Enter the elements of second array: ").split()] print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
15
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
1
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder lowerCAmelCase : List[Any] ="base_with_context" def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) ) _lowerCamelCase : Tuple = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__A ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCamelCase : List[str] = weights[F"""layers_{lyr_num}"""] _lowerCamelCase : Any = nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) _lowerCamelCase : List[str] = ly_weight["""attention"""] _lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) ) _lowerCamelCase : List[Any] = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__A ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCamelCase : Optional[Any] = weights[F"""layers_{lyr_num}"""] _lowerCamelCase : Tuple = ly_weight["""attention"""] _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCamelCase : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) ) _lowerCamelCase : List[str] = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__A ) _lowerCamelCase : Union[str, Any] = nn.Parameter( torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _lowerCamelCase : int = weights[F"""layers_{lyr_num}"""] _lowerCamelCase : Tuple = nn.Parameter( torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) ) _lowerCamelCase : List[Any] = nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) _lowerCamelCase : Union[str, Any] = ly_weight["""self_attention"""] _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCamelCase : List[str] = ly_weight["""MultiHeadDotProductAttention_0"""] _lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) _lowerCamelCase : List[str] = nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) ) return model def A__ ( __A ): '''simple docstring''' _lowerCamelCase : List[Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _lowerCamelCase : Tuple = jnp.tree_util.tree_map(onp.array , __A ) _lowerCamelCase : int = [ """from __gin__ import dynamic_registration""", """from music_spectrogram_diffusion.models.diffusion import diffusion_utils""", """diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""", """diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""", ] _lowerCamelCase : Tuple = os.path.join(args.checkpoint_path , """..""" , """config.gin""" ) _lowerCamelCase : Any = inference.parse_training_gin_file(__A , __A ) _lowerCamelCase : List[Any] = inference.InferenceModel(args.checkpoint_path , __A ) _lowerCamelCase : Optional[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" ) _lowerCamelCase : List[str] = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) _lowerCamelCase : int = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) _lowerCamelCase : Any = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _lowerCamelCase : str = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __A ) _lowerCamelCase : Any = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __A ) _lowerCamelCase : str = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __A ) _lowerCamelCase : Optional[int] = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" ) _lowerCamelCase : Union[str, Any] = SpectrogramDiffusionPipeline( notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": lowerCAmelCase : str =argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=F"""{MODEL}/checkpoint_500000""", type=str, required=False, help="Path to the original jax model checkpoint.", ) lowerCAmelCase : Tuple =parser.parse_args() main(args)
15
from __future__ import annotations lowerCAmelCase : int =[] def A__ ( __A , __A , __A ): '''simple docstring''' for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def A__ ( __A , __A ): '''simple docstring''' if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): _lowerCamelCase : int = 1 solve(__A , row + 1 ) _lowerCamelCase : List[str] = 0 return False def A__ ( __A ): '''simple docstring''' for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) lowerCAmelCase : int =8 lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
15
1
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class __snake_case : '''simple docstring''' _snake_case = None _snake_case = False _snake_case = False _snake_case = False _snake_case = None _snake_case = None _snake_case = False _snake_case = False _snake_case = False _snake_case = True _snake_case = None _snake_case = 1 _snake_case = None _snake_case = False _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( self : Dict) ->"DownloadConfig": """simple docstring""" return self.__class__(**{k: copy.deepcopy(_UpperCamelCase) for k, v in self.__dict__.items()})
15
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
1
import os import re import shutil import sys import tempfile import unittest import black lowerCAmelCase : Tuple =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. lowerCAmelCase : Dict =" \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" _lowerCamelCase : Tuple = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""")) _lowerCamelCase : Tuple = self.diffusers_dir shutil.copy( os.path.join(_UpperCamelCase , """src/diffusers/schedulers/scheduling_ddpm.py""") , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""") , ) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->str: """simple docstring""" _lowerCamelCase : str = """src/diffusers""" shutil.rmtree(self.diffusers_dir) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=None) ->Tuple: """simple docstring""" _lowerCamelCase : Dict = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _lowerCamelCase : int = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _lowerCamelCase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119) _lowerCamelCase : Optional[int] = black.format_str(_UpperCamelCase , mode=_UpperCamelCase) _lowerCamelCase : Optional[Any] = os.path.join(self.diffusers_dir , """new_code.py""") with open(_UpperCamelCase , """w""" , newline="""\n""") as f: f.write(_UpperCamelCase) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase)) == 0) else: check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase) with open(_UpperCamelCase , """r""") as f: self.assertTrue(f.read() , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : int = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""") self.assertEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , _UpperCamelCase , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , _UpperCamelCase) , ) # Copy consistency with a really long name _lowerCamelCase : Union[str, Any] = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason""" self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("""Bert""" , _UpperCamelCase , _UpperCamelCase) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , _UpperCamelCase , overwrite_result=re.sub("""DDPM""" , """Test""" , _UpperCamelCase) , )
15
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = 0 for ch in input_str: _lowerCamelCase : Optional[Any] = ord(__A ) _lowerCamelCase : List[str] = pow(2 , __A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
15
1
import os import string import sys lowerCAmelCase : Optional[int] =1 << 8 lowerCAmelCase : int ={ "tab": ord("\t"), "newline": ord("\r"), "esc": 27, "up": 65 + ARROW_KEY_FLAG, "down": 66 + ARROW_KEY_FLAG, "right": 67 + ARROW_KEY_FLAG, "left": 68 + ARROW_KEY_FLAG, "mod_int": 91, "undefined": sys.maxsize, "interrupt": 3, "insert": 50, "delete": 51, "pg_up": 53, "pg_down": 54, } lowerCAmelCase : str =KEYMAP["up"] lowerCAmelCase : str =KEYMAP["left"] if sys.platform == "win32": lowerCAmelCase : str =[] lowerCAmelCase : Tuple ={ B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, } for i in range(10): lowerCAmelCase : str =ord(str(i)) def A__ ( ): '''simple docstring''' if os.name == "nt": import msvcrt _lowerCamelCase : Union[str, Any] = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(__A ) == 0: # Read the keystroke _lowerCamelCase : Any = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowerCamelCase : List[str] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowerCamelCase : Dict = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(__A ) if ord(__A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) _lowerCamelCase : Any = chr(KEYMAP["""esc"""] ) except KeyError: _lowerCamelCase : List[Any] = cha[1] else: _lowerCamelCase : List[Any] = ch.decode(__A ) else: _lowerCamelCase : Tuple = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowerCamelCase : List[str] = sys.stdin.fileno() _lowerCamelCase : Union[str, Any] = termios.tcgetattr(__A ) try: tty.setraw(__A ) _lowerCamelCase : str = sys.stdin.read(1 ) finally: termios.tcsetattr(__A , termios.TCSADRAIN , __A ) return ch def A__ ( ): '''simple docstring''' _lowerCamelCase : List[Any] = get_raw_chars() if ord(__A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(__A ) == KEYMAP["esc"]: _lowerCamelCase : int = get_raw_chars() if ord(__A ) == KEYMAP["mod_int"]: _lowerCamelCase : Optional[Any] = get_raw_chars() if ord(__A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(__A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
15
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""") _lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : str = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""") _lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
15
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowerCAmelCase : int =["bert-base-uncased", "bert-base-cased"] lowerCAmelCase : List[str] ="hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class __snake_case ( tf.keras.Model ): '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" super().__init__() _lowerCamelCase : Tuple = tokenizer _lowerCamelCase : str = AutoConfig.from_pretrained(_UpperCamelCase) _lowerCamelCase : List[str] = TFAutoModel.from_config(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : Optional[int] = self.tokenizer(_UpperCamelCase) _lowerCamelCase : Dict = self.bert(**_UpperCamelCase) return out["pooler_output"] @require_tf @require_tensorflow_text class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]: """simple docstring""" super().setUp() _lowerCamelCase : int = [ BertTokenizer.from_pretrained(_UpperCamelCase) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false _lowerCamelCase : Tuple = [TFBertTokenizer.from_pretrained(_UpperCamelCase) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(_UpperCamelCase , use_fast_bert_tokenizer=_UpperCamelCase) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers) == len(self.tf_tokenizers) _lowerCamelCase : List[Any] = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] _lowerCamelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1])) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple: """simple docstring""" for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in (self.test_sentences, self.paired_sentences): _lowerCamelCase : Optional[Any] = tokenizer(_UpperCamelCase , return_tensors="""tf""" , padding="""longest""") _lowerCamelCase : Tuple = tf_tokenizer(_UpperCamelCase) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key])) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: _lowerCamelCase : Any = tf_tokenizer(self.paired_sentences) _lowerCamelCase : Optional[int] = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key])) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: _lowerCamelCase : List[Any] = tf.function(_UpperCamelCase) for test_inputs in (self.test_sentences, self.paired_sentences): _lowerCamelCase : List[Any] = tf.constant(_UpperCamelCase) _lowerCamelCase : Optional[int] = compiled_tokenizer(_UpperCamelCase) _lowerCamelCase : Optional[Any] = tf_tokenizer(_UpperCamelCase) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def _SCREAMING_SNAKE_CASE ( self : str) ->Dict: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: _lowerCamelCase : Dict = ModelToSave(tokenizer=_UpperCamelCase) _lowerCamelCase : int = tf.convert_to_tensor(self.test_sentences) _lowerCamelCase : List[str] = model(_UpperCamelCase) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _lowerCamelCase : Any = Path(_UpperCamelCase) / """saved.model""" model.save(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = tf.keras.models.load_model(_UpperCamelCase) _lowerCamelCase : Tuple = loaded_model(_UpperCamelCase) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1E-5)
15
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
1
from math import factorial def A__ ( __A , __A ): '''simple docstring''' # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("""Please enter positive integers for n and k where n >= k""" ) return factorial(__A ) // (factorial(__A ) * factorial(n - k )) if __name__ == "__main__": print( "The number of five-card hands possible from a standard", F"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( "If a class of 40 students must be arranged into groups of", F"""4 for group projects, there are {combinations(40, 4)} ways""", "to arrange them.\n", ) print( "If 10 teams are competing in a Formula One race, there", F"""are {combinations(10, 3)} ways that first, second and""", "third place can be awarded.", )
15
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
1
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' @register_to_config def __init__( self : Union[str, Any] , _UpperCamelCase : int = 768 , ) ->List[Any]: """simple docstring""" super().__init__() _lowerCamelCase : Optional[int] = nn.Parameter(torch.zeros(1 , _UpperCamelCase)) _lowerCamelCase : int = nn.Parameter(torch.ones(1 , _UpperCamelCase)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Union[str, torch.device]] = None , _UpperCamelCase : Optional[torch.dtype] = None , ) ->str: """simple docstring""" _lowerCamelCase : int = nn.Parameter(self.mean.to(_UpperCamelCase).to(_UpperCamelCase)) _lowerCamelCase : Optional[Any] = nn.Parameter(self.std.to(_UpperCamelCase).to(_UpperCamelCase)) return self def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Tuple) ->Any: """simple docstring""" _lowerCamelCase : Optional[Any] = (embeds - self.mean) * 1.0 / self.std return embeds def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Tuple) ->Any: """simple docstring""" _lowerCamelCase : Any = (embeds * self.std) + self.mean return embeds
15
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase : int ={ "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any =[ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
15
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
1
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCAmelCase : Optional[Any] =logging.getLogger(__name__) def A__ ( __A , __A ): '''simple docstring''' # save results if os.path.exists(__A ): if os.path.exists(os.path.join(__A , """config.json""" ) ) and os.path.isfile( os.path.join(__A , """config.json""" ) ): os.remove(os.path.join(__A , """config.json""" ) ) if os.path.exists(os.path.join(__A , """pytorch_model.bin""" ) ) and os.path.isfile( os.path.join(__A , """pytorch_model.bin""" ) ): os.remove(os.path.join(__A , """pytorch_model.bin""" ) ) else: os.makedirs(__A ) model.save_pretrained(__A ) def A__ ( __A , __A=False ): '''simple docstring''' _lowerCamelCase : Optional[int] = 2 if unlogit: _lowerCamelCase : Union[str, Any] = torch.pow(__A , __A ) _lowerCamelCase : Tuple = p * torch.log(__A ) _lowerCamelCase : Union[str, Any] = 0 return -plogp.sum(dim=-1 ) def A__ ( __A ): '''simple docstring''' logger.info("""lv, h >\t""" + """\t""".join(F"""{x + 1}""" for x in range(len(__A ) ) ) ) for row in range(len(__A ) ): if tensor.dtype != torch.long: logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) ) else: logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:d}""" for x in tensor[row].cpu().data ) ) def A__ ( __A , __A , __A , __A=True , __A=True , __A=None , __A=False ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _lowerCamelCase : int = torch.zeros(__A , __A ).to(args.device ) _lowerCamelCase : Optional[Any] = torch.zeros(__A , __A ).to(args.device ) if head_mask is None: _lowerCamelCase : List[Any] = torch.ones(__A , __A ).to(args.device ) head_mask.requires_grad_(requires_grad=__A ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _lowerCamelCase : str = None _lowerCamelCase : List[Any] = 0.0 _lowerCamelCase : Tuple = 0.0 for step, inputs in enumerate(tqdm(__A , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ): _lowerCamelCase : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_lowerCamelCase) , ) : Union[str, Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _lowerCamelCase : str = model(__A , labels=__A , head_mask=__A ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__A ): _lowerCamelCase : Dict = entropy(attn.detach() , __A ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__A ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _lowerCamelCase : Union[str, Any] = 2 _lowerCamelCase : int = torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _lowerCamelCase : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("""Attention entropies""" ) print_ad_tensor(__A ) if compute_importance: logger.info("""Head importance scores""" ) print_ad_tensor(__A ) logger.info("""Head ranked by importance scores""" ) _lowerCamelCase : Dict = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _lowerCamelCase : int = torch.arange( head_importance.numel() , device=args.device ) _lowerCamelCase : Dict = head_ranks.view_as(__A ) print_ad_tensor(__A ) return attn_entropy, head_importance, total_loss def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = compute_heads_importance(__A , __A , __A , compute_entropy=__A ) _lowerCamelCase : Optional[int] = 1 / loss # instead of downsteam score use the LM loss logger.info("""Pruning: original score: %f, threshold: %f""" , __A , original_score * args.masking_threshold ) _lowerCamelCase : int = torch.ones_like(__A ) _lowerCamelCase : Any = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _lowerCamelCase : Optional[Any] = original_score while current_score >= original_score * args.masking_threshold: _lowerCamelCase : List[Any] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _lowerCamelCase : int = float("""Inf""" ) _lowerCamelCase : Optional[int] = head_importance.view(-1 ).sort()[1] if len(__A ) <= num_to_mask: print("""BREAK BY num_to_mask""" ) break # mask heads _lowerCamelCase : str = current_heads_to_mask[:num_to_mask] logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) ) _lowerCamelCase : Union[str, Any] = new_head_mask.view(-1 ) _lowerCamelCase : Optional[Any] = 0.0 _lowerCamelCase : Tuple = new_head_mask.view_as(__A ) _lowerCamelCase : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(__A ) # Compute metric and head importance again _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = compute_heads_importance( __A , __A , __A , compute_entropy=__A , head_mask=__A ) _lowerCamelCase : str = 1 / loss logger.info( """Masking: current score: %f, remaining heads %d (%.1f percents)""" , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("""Final head mask""" ) print_ad_tensor(__A ) np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() ) return head_mask def A__ ( __A , __A , __A , __A ): '''simple docstring''' _lowerCamelCase : Optional[int] = datetime.now() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = compute_heads_importance( __A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A ) _lowerCamelCase : Optional[Any] = 1 / loss _lowerCamelCase : Tuple = datetime.now() - before_time _lowerCamelCase : Optional[Any] = sum(p.numel() for p in model.parameters() ) _lowerCamelCase : Any = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) ) } for k, v in heads_to_prune.items(): if isinstance(__A , __A ): _lowerCamelCase : Any = [ v, ] assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__A ) _lowerCamelCase : Union[str, Any] = sum(p.numel() for p in model.parameters() ) _lowerCamelCase : Any = datetime.now() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = compute_heads_importance( __A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , ) _lowerCamelCase : Dict = 1 / loss _lowerCamelCase : Tuple = datetime.now() - before_time logger.info( """Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , __A , __A , pruned_num_params / original_num_params * 100 , ) logger.info("""Pruning: score with masking: %f score with pruning: %f""" , __A , __A ) logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 ) save_model(__A , args.output_dir ) def A__ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--data_dir""" , default=__A , type=__A , required=__A , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , ) parser.add_argument( """--model_name_or_path""" , default=__A , type=__A , required=__A , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--output_dir""" , default=__A , type=__A , required=__A , help="""The output directory where the model predictions and checkpoints will be written.""" , ) # Other parameters parser.add_argument( """--config_name""" , default="""""" , type=__A , help="""Pretrained config name or path if not the same as model_name_or_path""" , ) parser.add_argument( """--tokenizer_name""" , default="""""" , type=__A , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , ) parser.add_argument( """--cache_dir""" , default=__A , type=__A , help="""Where do you want to store the pre-trained models downloaded from s3""" , ) parser.add_argument( """--data_subset""" , type=__A , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" ) parser.add_argument( """--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) parser.add_argument( """--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" ) parser.add_argument( """--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , ) parser.add_argument( """--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" ) parser.add_argument( """--masking_threshold""" , default=0.9 , type=__A , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , ) parser.add_argument( """--masking_amount""" , default=0.1 , type=__A , help="""Amount to heads to masking at each masking step.""" ) parser.add_argument("""--metric_name""" , default="""acc""" , type=__A , help="""Metric to use for head masking.""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=__A , help=( """The maximum total input sequence length after WordPiece tokenization. \n""" """Sequences longer than this will be truncated, sequences shorter padded.""" ) , ) parser.add_argument("""--batch_size""" , default=1 , type=__A , help="""Batch size.""" ) parser.add_argument("""--seed""" , type=__A , default=42 ) parser.add_argument("""--local_rank""" , type=__A , default=-1 , help="""local_rank for distributed training on gpus""" ) parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" ) parser.add_argument("""--server_ip""" , type=__A , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=__A , default="""""" , help="""Can be used for distant debugging.""" ) _lowerCamelCase : Tuple = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _lowerCamelCase : Tuple = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" ) _lowerCamelCase : Union[str, Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _lowerCamelCase : Union[str, Any] = torch.device("""cuda""" , args.local_rank ) _lowerCamelCase : Optional[int] = 1 torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _lowerCamelCase : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _lowerCamelCase : Optional[Any] = nn.parallel.DistributedDataParallel( __A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A ) elif args.n_gpu > 1: _lowerCamelCase : List[str] = nn.DataParallel(__A ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__A ) torch.save(__A , os.path.join(args.output_dir , """run_args.bin""" ) ) logger.info("""Training/evaluation parameters %s""" , __A ) # Prepare dataset _lowerCamelCase : Union[str, Any] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _lowerCamelCase : Optional[int] = (torch.from_numpy(__A ),) _lowerCamelCase : int = TensorDataset(*__A ) _lowerCamelCase : str = RandomSampler(__A ) _lowerCamelCase : Dict = DataLoader(__A , sampler=__A , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__A , __A , __A ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _lowerCamelCase : str = mask_heads(__A , __A , __A ) prune_heads(__A , __A , __A , __A ) if __name__ == "__main__": main()
15
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = {} _lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""] _lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments) lowerCAmelCase : int =parser.parse_args() if args.num_workers is None: lowerCAmelCase : Any =multiprocessing.cpu_count() lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase : str =time.time() lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase : Dict =time.time() lowerCAmelCase : Dict =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
15
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : str ={ "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] =[ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int =[ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
15
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = IFPipeline _snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" return self._get_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : int = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa) _lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""") _lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""") del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCamelCase : str = None _lowerCamelCase : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components) _lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components) _lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : str = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Any = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : List[str] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def A__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
1
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = 1 for i in range(1 , num + 1 ): fact *= i return fact def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Any = 0 while number > 0: _lowerCamelCase : List[Any] = number % 10 sum_of_digits += last_digit _lowerCamelCase : str = number // 10 # Removing the last_digit from the given number return sum_of_digits def A__ ( __A = 100 ): '''simple docstring''' _lowerCamelCase : Optional[int] = factorial(__A ) _lowerCamelCase : List[Any] = split_and_add(__A ) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Any =logging.get_logger(__name__) lowerCAmelCase : List[Any] ={ "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = 'swin' _snake_case = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[str] = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : Union[str, Any] = embed_dim _lowerCamelCase : str = depths _lowerCamelCase : str = len(_UpperCamelCase) _lowerCamelCase : Optional[Any] = num_heads _lowerCamelCase : Tuple = window_size _lowerCamelCase : int = mlp_ratio _lowerCamelCase : Optional[int] = qkv_bias _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Dict = use_absolute_embeddings _lowerCamelCase : int = layer_norm_eps _lowerCamelCase : str = initializer_range _lowerCamelCase : Dict = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1)) _lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)] _lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices( out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = version.parse('1.11' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float: """simple docstring""" return 1E-4
15
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: lowerCAmelCase : Any =None lowerCAmelCase : Tuple =logging.get_logger(__name__) lowerCAmelCase : Tuple ="▁" lowerCAmelCase : str ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} lowerCAmelCase : int ={ "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } lowerCAmelCase : Tuple ={ "google/pegasus-xsum": 512, } class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = PegasusTokenizer _snake_case = ['input_ids', 'attention_mask'] def __init__( self : Tuple , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : int="<unk>" , _UpperCamelCase : Any="<mask_2>" , _UpperCamelCase : Optional[int]="<mask_1>" , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=103 , **_UpperCamelCase : Tuple , ) ->Optional[int]: """simple docstring""" _lowerCamelCase : List[str] = offset if additional_special_tokens is not None: if not isinstance(_UpperCamelCase , _UpperCamelCase): raise TypeError( F"""additional_special_tokens should be of type {type(_UpperCamelCase)}, but is""" F""" {type(_UpperCamelCase)}""") _lowerCamelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"""<unk_{i}>""" for i in range(len(_UpperCamelCase) , self.offset - 1) ] if len(set(_UpperCamelCase)) != len(_UpperCamelCase): raise ValueError( """Please make sure that the provided additional_special_tokens do not contain an incorrectly""" F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""") _lowerCamelCase : str = additional_special_tokens_extended else: _lowerCamelCase : List[str] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset)] super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , pad_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , mask_token=_UpperCamelCase , mask_token_sent=_UpperCamelCase , offset=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : str = vocab_file _lowerCamelCase : Any = False if not self.vocab_file else True def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Any) ->List[str]: """simple docstring""" _lowerCamelCase : Dict = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( """There should be 3 special tokens: mask_token, pad_token, and eos_token +""" F""" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}""") return [1 if x in all_special_ids else 0 for x in seq] def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List , _UpperCamelCase : Optional[List] = None , _UpperCamelCase : bool = False) ->List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(_UpperCamelCase) elif token_ids_a is None: return self._special_token_mask(_UpperCamelCase) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple=None) ->List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""") if not os.path.isdir(_UpperCamelCase): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return _lowerCamelCase : List[str] = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCamelCase): copyfile(self.vocab_file , _UpperCamelCase) return (out_vocab_file,)
15
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (EulerDiscreteScheduler,) _snake_case = 10 def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : str = torch.manual_seed(0) _lowerCamelCase : str = self.dummy_model() _lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : int = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Dict = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""") _lowerCamelCase : int = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : Dict = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.prev_sample _lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Tuple = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : List[Any] = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : int = output.prev_sample _lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
15
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = ViTImageProcessor if is_vision_available() else None @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = (3, 32, 128) _lowerCamelCase : str = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase)))) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(_UpperCamelCase) + """\n""") _lowerCamelCase : Any = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) _lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) return image_input def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0) _lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : int = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""") _lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Optional[int] = """test""" _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase) _lowerCamelCase : Dict = tokenizer(_UpperCamelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = """test""" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase): processor() def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Any = processor.char_decode(_UpperCamelCase) _lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase) _lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = None _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = torch.randn(1 , 27 , 38) _lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257) _lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522) _lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
15
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : Dict ={"vocab_file": "vocab.json"} lowerCAmelCase : List[str] ={ "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } lowerCAmelCase : int ={"mgp-str": 27} class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]: """simple docstring""" super().__init__( unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , ) with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle: _lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase) _lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()} @property def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" return len(self.vocab) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = [] for s in text: char_tokens.extend(_UpperCamelCase) return char_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict: """simple docstring""" return self.decoder.get(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase)) return _lowerCamelCase : Tuple = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""") return (vocab_file,)
15
1
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Tuple = ["""a""", """b""", """c"""] # Defaults to last layer if both are None _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""c"""]) self.assertEqual(_UpperCamelCase , [2]) # Out indices set to match out features _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features set to match out indices _lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features selected from negative indices _lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [-3, -1]) def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase) # Out features must be a list with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""]) # Out features must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""]) # Out indices must be a list or tuple with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""]) # Out indices must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""]) # Out features and out indices must be the same length with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""]) # Out features should match out indices with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""]) # Out features and out indices should be in order with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""]) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""]) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : int = BackboneMixin() _lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""] _lowerCamelCase : Tuple = ["""a""", """c"""] _lowerCamelCase : List[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly _lowerCamelCase : str = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""]) self.assertEqual(backbone.out_indices , [0, 1]) _lowerCamelCase : Optional[int] = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [-3, -1])
15
1
import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , *_UpperCamelCase : int , **_UpperCamelCase : List[Any]) ->None: """simple docstring""" warnings.warn( """The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use DeformableDetrImageProcessor instead.""" , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase)
15
import math def A__ ( __A ): '''simple docstring''' assert isinstance(__A , __A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def A__ ( __A , __A=1 , **__A ): '''simple docstring''' _lowerCamelCase : Dict = factor * value _lowerCamelCase : str = value while not is_prime(__A ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **__A ) return value
15
1
from scipy.stats import pearsonr import datasets lowerCAmelCase : Any ="\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n" lowerCAmelCase : List[str] ="\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n" lowerCAmelCase : Union[str, Any] ="\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int) ->List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float"""), """references""": datasets.Value("""float"""), }) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , ) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=False) ->Optional[Any]: """simple docstring""" if return_pvalue: _lowerCamelCase : Union[str, Any] = pearsonr(_UpperCamelCase , _UpperCamelCase) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(_UpperCamelCase , _UpperCamelCase)[0])}
15
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple: """simple docstring""" super().__init__(*_UpperCamelCase , **_UpperCamelCase) requires_backends(self , """vision""") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[int] = {} if top_k is not None: _lowerCamelCase : str = top_k return {}, {}, postprocess_params def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict: """simple docstring""" return super().__call__(_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Tuple = load_image(_UpperCamelCase) _lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework) return model_inputs def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Any = self.model(**_UpperCamelCase) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str: """simple docstring""" if top_k > self.model.config.num_labels: _lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": _lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0] _lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase) elif self.framework == "tf": _lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0] _lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase) _lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"""Unsupported framework: {self.framework}""") _lowerCamelCase : str = scores.tolist() _lowerCamelCase : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
15
1
import math from collections.abc import Callable def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : float = xa _lowerCamelCase : float = xa while True: if x_n == x_na or function(__A ) == function(__A ): raise ZeroDivisionError("""float division by zero, could not find root""" ) _lowerCamelCase : float = x_na - ( function(__A ) / ((function(__A ) - function(__A )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na _lowerCamelCase : Tuple = x_na _lowerCamelCase : Dict = x_na def A__ ( __A ): '''simple docstring''' return math.pow(__A , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
15
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = ViTImageProcessor if is_vision_available() else None @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = (3, 32, 128) _lowerCamelCase : str = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase)))) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(_UpperCamelCase) + """\n""") _lowerCamelCase : Any = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) _lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) return image_input def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0) _lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : int = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""") _lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Optional[int] = """test""" _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase) _lowerCamelCase : Dict = tokenizer(_UpperCamelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = """test""" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase): processor() def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Any = processor.char_decode(_UpperCamelCase) _lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase) _lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = None _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = torch.randn(1 , 27 , 38) _lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257) _lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522) _lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
15
1
import os import pytest from transformers.dynamic_module_utils import get_imports lowerCAmelCase : str ="\nimport os\n" lowerCAmelCase : int ="\ndef foo():\n import os\n return False\n" lowerCAmelCase : Optional[int] ="\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" lowerCAmelCase : List[Any] ="\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" lowerCAmelCase : Dict ="\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" lowerCAmelCase : Union[str, Any] ="\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" lowerCAmelCase : Any ="\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" lowerCAmelCase : Tuple ="\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" lowerCAmelCase : Optional[Any] ="\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" lowerCAmelCase : Optional[Any] ="\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" lowerCAmelCase : Optional[Any] =[ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("""case""" , __A ) def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = os.path.join(__A , """test_file.py""" ) with open(__A , """w""" ) as _tmp_file: _tmp_file.write(__A ) _lowerCamelCase : Optional[int] = get_imports(__A ) assert parsed_imports == ["os"]
15
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__A , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__A ) return parser.parse_args() def A__ ( ): '''simple docstring''' _lowerCamelCase : List[str] = parse_args() # Import training_script as a module. _lowerCamelCase : List[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowerCamelCase : Optional[Any] = script_fpath.stem _lowerCamelCase : Dict = importlib.import_module(__A ) # Patch sys.argv _lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
15
1
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) if n == 0: return 0 _lowerCamelCase : Tuple = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) _lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def A__ ( __A , __A , __A ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCamelCase : int = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) _lowerCamelCase : Optional[Any] = max_revenue return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _lowerCamelCase : Any = 0 for i in range(1 , n + 1 ): _lowerCamelCase : Any = max_rev[i] for j in range(1 , i + 1 ): _lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) _lowerCamelCase : int = max_revenue_i return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' if n < 0: _lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): _lowerCamelCase : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ F"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = [6, 10, 12, 15, 20, 23] _lowerCamelCase : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCamelCase : Tuple = 36 _lowerCamelCase : Any = top_down_cut_rod(__A , __A ) _lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A ) _lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
15
1
def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): _lowerCamelCase : str = n - k # Calculate C(n,k) for i in range(__A ): result *= n - i result //= i + 1 return result def A__ ( __A ): '''simple docstring''' return binomial_coefficient(2 * node_count , __A ) // (node_count + 1) def A__ ( __A ): '''simple docstring''' if n < 0: raise ValueError("""factorial() not defined for negative values""" ) _lowerCamelCase : Union[str, Any] = 1 for i in range(1 , n + 1 ): result *= i return result def A__ ( __A ): '''simple docstring''' return catalan_number(__A ) * factorial(__A ) if __name__ == "__main__": lowerCAmelCase : List[Any] =int(input("Enter the number of nodes: ").strip() or 0) if node_count <= 0: raise ValueError("We need some nodes to work with.") print( F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """ F"""binary trees and {catalan_number(node_count)} binary search trees.""" )
15
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
1
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def A__ ( __A ): '''simple docstring''' _lowerCamelCase : str = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", """decoder.output_projection.weight""", ] for k in ignore_keys: state_dict.pop(__A , __A ) def A__ ( __A ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Dict = emb.weight.shape _lowerCamelCase : List[str] = nn.Linear(__A , __A , bias=__A ) _lowerCamelCase : Optional[Any] = emb.weight.data return lin_layer def A__ ( __A , __A="facebook/mbart-large-en-ro" , __A=False , __A=False ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = torch.load(__A , map_location="""cpu""" )["""model"""] remove_ignore_keys_(__A ) _lowerCamelCase : Optional[Any] = state_dict["""encoder.embed_tokens.weight"""].shape[0] _lowerCamelCase : str = MBartConfig.from_pretrained(__A , vocab_size=__A ) if mbart_aa and finetuned: _lowerCamelCase : Union[str, Any] = """relu""" _lowerCamelCase : List[Any] = state_dict["""decoder.embed_tokens.weight"""] _lowerCamelCase : Any = MBartForConditionalGeneration(__A ) model.model.load_state_dict(__A ) if finetuned: _lowerCamelCase : Dict = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") lowerCAmelCase : Dict =parser.parse_args() lowerCAmelCase : Any =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
15
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
1
import socket def A__ ( ): '''simple docstring''' _lowerCamelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCamelCase : List[str] = socket.gethostname() _lowerCamelCase : Tuple = 12_312 sock.connect((host, port) ) sock.send(B"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: _lowerCamelCase : Union[str, Any] = sock.recv(1_024 ) if not data: break out_file.write(__A ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
15
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
1
from __future__ import annotations lowerCAmelCase : Optional[int] ="Muhammad Umer Farooq" lowerCAmelCase : Optional[Any] ="MIT" lowerCAmelCase : Any ="1.0.0" lowerCAmelCase : Tuple ="Muhammad Umer Farooq" lowerCAmelCase : Union[str, Any] ="contact@muhammadumerfarooq.me" lowerCAmelCase : Union[str, Any] ="Alpha" import re from html.parser import HTMLParser from urllib import parse import requests class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCamelCase : str) ->None: """simple docstring""" super().__init__() _lowerCamelCase : list[str] = [] _lowerCamelCase : Any = domain def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : list[tuple[str, str | None]]) ->None: """simple docstring""" if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: _lowerCamelCase : List[Any] = parse.urljoin(self.domain , _UpperCamelCase) self.urls.append(_UpperCamelCase) def A__ ( __A ): '''simple docstring''' return ".".join(get_sub_domain_name(__A ).split(""".""" )[-2:] ) def A__ ( __A ): '''simple docstring''' return parse.urlparse(__A ).netloc def A__ ( __A = "https://github.com" ): '''simple docstring''' _lowerCamelCase : int = get_domain_name(__A ) # Initialize the parser _lowerCamelCase : Union[str, Any] = Parser(__A ) try: # Open URL _lowerCamelCase : Optional[int] = requests.get(__A ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through _lowerCamelCase : Union[str, Any] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: _lowerCamelCase : Any = requests.get(__A ) # Get the valid email. _lowerCamelCase : Optional[int] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(__A ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(__A ) if __name__ == "__main__": lowerCAmelCase : Dict =emails_from_url("https://github.com") print(F"""{len(emails)} emails found:""") print("\n".join(sorted(emails)))
15
from __future__ import annotations lowerCAmelCase : int =[] def A__ ( __A , __A , __A ): '''simple docstring''' for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def A__ ( __A , __A ): '''simple docstring''' if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): _lowerCamelCase : int = 1 solve(__A , row + 1 ) _lowerCamelCase : List[str] = 0 return False def A__ ( __A ): '''simple docstring''' for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) lowerCAmelCase : int =8 lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
15
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType lowerCAmelCase : int =logging.get_logger(__name__) lowerCAmelCase : Any ={ "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = 'deberta-v2' def __init__( self : int , _UpperCamelCase : Tuple=12_8100 , _UpperCamelCase : int=1536 , _UpperCamelCase : Union[str, Any]=24 , _UpperCamelCase : Union[str, Any]=24 , _UpperCamelCase : Optional[Any]=6144 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : Optional[int]=1E-7 , _UpperCamelCase : str=False , _UpperCamelCase : Tuple=-1 , _UpperCamelCase : Tuple=0 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : int="gelu" , **_UpperCamelCase : int , ) ->Any: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : List[str] = intermediate_size _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : int = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Any = type_vocab_size _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : Union[str, Any] = relative_attention _lowerCamelCase : int = max_relative_positions _lowerCamelCase : str = pad_token_id _lowerCamelCase : int = position_biased_input # Backwards compatibility if type(_UpperCamelCase) == str: _lowerCamelCase : List[str] = [x.strip() for x in pos_att_type.lower().split("""|""")] _lowerCamelCase : List[Any] = pos_att_type _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = kwargs.get("""pooler_hidden_size""" , _UpperCamelCase) _lowerCamelCase : str = pooler_dropout _lowerCamelCase : Optional[Any] = pooler_hidden_act class __snake_case ( __lowerCAmelCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : str) ->Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _lowerCamelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : int = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)]) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)]) @property def _SCREAMING_SNAKE_CASE ( self : str) ->int: """simple docstring""" return 12 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _UpperCamelCase : int = -1 , _UpperCamelCase : int = -1 , _UpperCamelCase : int = -1 , _UpperCamelCase : bool = False , _UpperCamelCase : Optional["TensorType"] = None , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 40 , _UpperCamelCase : int = 40 , _UpperCamelCase : "PreTrainedTokenizerBase" = None , ) ->Mapping[str, Any]: """simple docstring""" _lowerCamelCase : Any = super().generate_dummy_inputs(preprocessor=_UpperCamelCase , framework=_UpperCamelCase) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
15
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
1
import qiskit def A__ ( __A = 2 ): '''simple docstring''' _lowerCamelCase : str = qubits # Using Aer's simulator _lowerCamelCase : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" ) # Creating a Quantum Circuit acting on the q register _lowerCamelCase : Dict = qiskit.QuantumCircuit(__A , __A ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , __A ): # Adding CX (CNOT) gate circuit.cx(i - 1 , __A ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(__A ) ) , list(range(__A ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator _lowerCamelCase : Dict = qiskit.execute(__A , __A , shots=1_000 ) return job.result().get_counts(__A ) if __name__ == "__main__": print(F"""Total count for various states are: {quantum_entanglement(3)}""")
15
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = 0 for ch in input_str: _lowerCamelCase : Optional[Any] = ord(__A ) _lowerCamelCase : List[str] = pow(2 , __A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
15
1
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A__ ( __A , __A=False ): '''simple docstring''' try: _lowerCamelCase : List[str] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _lowerCamelCase : Optional[int] = default else: # KEY is set, convert it to True or False. try: _lowerCamelCase : List[str] = strtobool(__A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value lowerCAmelCase : str =parse_flag_from_env("RUN_SLOW", default=False) def A__ ( __A ): '''simple docstring''' return unittest.skip("""Test was skipped""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__A ) def A__ ( __A=None , __A=None ): '''simple docstring''' if test_case is None: return partial(__A , version=__A ) return unittest.skipUnless(is_torch_version(""">=""" , __A ) , F"""test requires torch version >= {version}""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__A ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__A ) lowerCAmelCase : Union[str, Any] =( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A__ ( __A ): '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__A ) class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = True @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = tempfile.mkdtemp() @classmethod def _SCREAMING_SNAKE_CASE ( cls : int) ->List[Any]: """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob("""**/*"""): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(_UpperCamelCase) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Union[mock.Mock, List[mock.Mock]]) ->List[str]: """simple docstring""" _lowerCamelCase : Optional[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = AcceleratorState() _lowerCamelCase : Tuple = tensor[None].clone().to(state.device ) _lowerCamelCase : int = gather(__A ).cpu() _lowerCamelCase : Optional[int] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __A ): return False return True class __snake_case : '''simple docstring''' def __init__( self : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->List[str]: """simple docstring""" _lowerCamelCase : List[str] = returncode _lowerCamelCase : Union[str, Any] = stdout _lowerCamelCase : List[Any] = stderr async def A__ ( __A , __A ): '''simple docstring''' while True: _lowerCamelCase : List[str] = await stream.readline() if line: callback(__A ) else: break async def A__ ( __A , __A=None , __A=None , __A=None , __A=False , __A=False ): '''simple docstring''' if echo: print("""\nRunning: """ , """ """.join(__A ) ) _lowerCamelCase : Dict = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__A , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _lowerCamelCase : int = [] _lowerCamelCase : int = [] def tee(__A , __A , __A , __A="" ): _lowerCamelCase : Dict = line.decode("""utf-8""" ).rstrip() sink.append(__A ) if not quiet: print(__A , __A , file=__A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __A : tee(__A , __A , sys.stdout , label="""stdout:""" ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __A : tee(__A , __A , sys.stderr , label="""stderr:""" ) ) ), ] , timeout=__A , ) return _RunOutput(await p.wait() , __A , __A ) def A__ ( __A , __A=None , __A=None , __A=180 , __A=False , __A=True ): '''simple docstring''' _lowerCamelCase : List[Any] = asyncio.get_event_loop() _lowerCamelCase : Any = loop.run_until_complete( _stream_subprocess(__A , env=__A , stdin=__A , timeout=__A , quiet=__A , echo=__A ) ) _lowerCamelCase : Optional[int] = """ """.join(__A ) if result.returncode > 0: _lowerCamelCase : Optional[Any] = """\n""".join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) return result class __snake_case ( __lowerCAmelCase ): '''simple docstring''' pass def A__ ( __A , __A=False ): '''simple docstring''' try: _lowerCamelCase : Optional[Any] = subprocess.check_output(__A , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__A , """decode""" ): _lowerCamelCase : List[str] = output.decode("""utf-8""" ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"""Command `{' '.join(__A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
15
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""") _lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : str = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""") _lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
15
1
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def A__ ( __A ): # picklable for multiprocessing '''simple docstring''' return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def A__ ( ): '''simple docstring''' with parallel_backend("""spark""" ): assert ParallelBackendConfig.backend_name == "spark" _lowerCamelCase : List[str] = [1, 2, 3] with pytest.raises(__A ): with parallel_backend("""unsupported backend""" ): map_nested(__A , __A , num_proc=2 ) with pytest.raises(__A ): with parallel_backend("""unsupported backend""" ): map_nested(__A , __A , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("""num_proc""" , [2, -1] ) def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Optional[Any] = [1, 2] _lowerCamelCase : str = {"""a""": 1, """b""": 2} _lowerCamelCase : Dict = {"""a""": [1, 2], """b""": [3, 4]} _lowerCamelCase : Tuple = {"""a""": {"""1""": 1}, """b""": 2} _lowerCamelCase : Any = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} _lowerCamelCase : Dict = [2, 3] _lowerCamelCase : str = {"""a""": 2, """b""": 3} _lowerCamelCase : Tuple = {"""a""": [2, 3], """b""": [4, 5]} _lowerCamelCase : Union[str, Any] = {"""a""": {"""1""": 2}, """b""": 3} _lowerCamelCase : List[Any] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend("""spark""" ): assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
15
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
1
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) def A__ ( __A , __A ): '''simple docstring''' def run_func(__A ): @wraps(__A ) def run_in_eager_mode(*__A , **__A ): return func(*__A , **__A ) @wraps(__A ) @tf.function(experimental_compile=__A ) def run_in_graph_mode(*__A , **__A ): return func(*__A , **__A ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Any = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = 42 _snake_case = 42 _snake_case = "TensorFlow" @property def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: """simple docstring""" return tf.__version__ def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : int) ->float: """simple docstring""" _lowerCamelCase : List[str] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _lowerCamelCase : Optional[Any] = self._prepare_inference_func(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) return self._measure_speed(_inference) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : int) ->float: """simple docstring""" _lowerCamelCase : List[Any] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _lowerCamelCase : str = self._prepare_train_func(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) return self._measure_speed(_train) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : int) ->[Memory, Optional[MemorySummary]]: """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCamelCase) _lowerCamelCase : Optional[Any] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _lowerCamelCase : List[Any] = self._prepare_inference_func(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) return self._measure_memory(_inference) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : int) ->[Memory, Optional[MemorySummary]]: """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCamelCase) _lowerCamelCase : Optional[int] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _lowerCamelCase : Optional[int] = self._prepare_train_func(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) return self._measure_memory(_train) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : int) ->Callable[[], None]: """simple docstring""" _lowerCamelCase : Tuple = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""") _lowerCamelCase : Dict = ( hasattr(_UpperCamelCase , """architectures""") and isinstance(config.architectures , _UpperCamelCase) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCamelCase : Dict = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCamelCase : List[Any] = __import__("""transformers""" , fromlist=[model_class]) _lowerCamelCase : int = getattr(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = model_cls(_UpperCamelCase) except ImportError: raise ImportError( F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""") else: _lowerCamelCase : List[Any] = TF_MODEL_MAPPING[config.__class__](_UpperCamelCase) # encoder-decoder has vocab size saved differently _lowerCamelCase : int = config.vocab_size if hasattr(_UpperCamelCase , """vocab_size""") else config.encoder.vocab_size _lowerCamelCase : List[Any] = random_input_ids(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_decoder_forward(): return model(_UpperCamelCase , decoder_input_ids=_UpperCamelCase , training=_UpperCamelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_forward(): return model(_UpperCamelCase , training=_UpperCamelCase) _lowerCamelCase : Union[str, Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : int) ->Callable[[], None]: """simple docstring""" _lowerCamelCase : Any = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""") if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""") _lowerCamelCase : List[str] = ( hasattr(_UpperCamelCase , """architectures""") and isinstance(config.architectures , _UpperCamelCase) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCamelCase : Union[str, Any] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCamelCase : str = __import__("""transformers""" , fromlist=[model_class]) _lowerCamelCase : int = getattr(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = model_cls(_UpperCamelCase) except ImportError: raise ImportError( F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""") else: _lowerCamelCase : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_UpperCamelCase) # encoder-decoder has vocab size saved differently _lowerCamelCase : int = config.vocab_size if hasattr(_UpperCamelCase , """vocab_size""") else config.encoder.vocab_size _lowerCamelCase : Union[str, Any] = random_input_ids(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_decoder_train(): _lowerCamelCase : Dict = model(_UpperCamelCase , decoder_input_ids=_UpperCamelCase , labels=_UpperCamelCase , training=_UpperCamelCase)[0] _lowerCamelCase : int = tf.gradients(_UpperCamelCase , model.trainable_variables) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_train(): _lowerCamelCase : Dict = model(_UpperCamelCase , labels=_UpperCamelCase , training=_UpperCamelCase)[0] _lowerCamelCase : Any = tf.gradients(_UpperCamelCase , model.trainable_variables) return gradients _lowerCamelCase : List[str] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : int) ->float: """simple docstring""" with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""") timeit.repeat(_UpperCamelCase , repeat=1 , number=5) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _lowerCamelCase : Tuple = timeit.repeat( _UpperCamelCase , repeat=self.args.repeat , number=10 , ) return min(_UpperCamelCase) / 1_0.0 except ResourceExhaustedError as e: self.print_fn(F"""Doesn't fit on GPU. {e}""") def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Callable[[], None]) ->[Memory, MemorySummary]: """simple docstring""" logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""") with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""") _lowerCamelCase : List[str] = start_memory_tracing("""transformers""") if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""") elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""") _lowerCamelCase : Optional[Any] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""") # init nvml nvml.nvmlInit() func() _lowerCamelCase : Optional[int] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) _lowerCamelCase : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(_UpperCamelCase) _lowerCamelCase : int = meminfo.used _lowerCamelCase : List[Any] = Memory(_UpperCamelCase) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""") _lowerCamelCase : str = None else: _lowerCamelCase : Union[str, Any] = measure_peak_memory_cpu(_UpperCamelCase) _lowerCamelCase : List[Any] = Memory(_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else memory_bytes if self.args.trace_memory_line_by_line: _lowerCamelCase : Any = stop_memory_tracing(_UpperCamelCase) if memory is None: _lowerCamelCase : Union[str, Any] = summary.total else: _lowerCamelCase : List[str] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F"""Doesn't fit on GPU. {e}""") return "N/A", None
15
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
1
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class __snake_case : '''simple docstring''' def __init__( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple=13 , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Dict=True , _UpperCamelCase : str=True , _UpperCamelCase : Any=True , _UpperCamelCase : Any=True , _UpperCamelCase : str=99 , _UpperCamelCase : Any=32 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : str=4 , _UpperCamelCase : Tuple=37 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Tuple=512 , _UpperCamelCase : Union[str, Any]=16 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[Any]="None" , _UpperCamelCase : int=3 , _UpperCamelCase : Any=4 , _UpperCamelCase : Tuple=None , ) ->Any: """simple docstring""" _lowerCamelCase : Any = parent _lowerCamelCase : Tuple = batch_size _lowerCamelCase : Tuple = seq_length _lowerCamelCase : Tuple = is_training _lowerCamelCase : Optional[Any] = use_input_mask _lowerCamelCase : Dict = use_token_type_ids _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Optional[int] = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Any = num_attention_heads _lowerCamelCase : int = intermediate_size _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Optional[int] = type_vocab_size _lowerCamelCase : str = type_sequence_label_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Tuple = num_labels _lowerCamelCase : Tuple = num_choices _lowerCamelCase : List[str] = relative_attention _lowerCamelCase : List[Any] = position_biased_input _lowerCamelCase : str = pos_att_type _lowerCamelCase : int = scope def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: """simple docstring""" _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowerCamelCase : List[str] = None if self.use_input_mask: _lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : List[Any] = None if self.use_token_type_ids: _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _lowerCamelCase : List[Any] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None if self.use_labels: _lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _lowerCamelCase : int = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_UpperCamelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Tuple) ->Tuple: """simple docstring""" _lowerCamelCase : Tuple = TFDebertaVaModel(config=_UpperCamelCase) _lowerCamelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _lowerCamelCase : Dict = [input_ids, input_mask] _lowerCamelCase : Optional[int] = model(_UpperCamelCase) _lowerCamelCase : Dict = model(_UpperCamelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str]) ->Dict: """simple docstring""" _lowerCamelCase : List[Any] = TFDebertaVaForMaskedLM(config=_UpperCamelCase) _lowerCamelCase : Union[str, Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _lowerCamelCase : Any = model(_UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]) ->Tuple: """simple docstring""" _lowerCamelCase : Optional[int] = self.num_labels _lowerCamelCase : List[str] = TFDebertaVaForSequenceClassification(config=_UpperCamelCase) _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _lowerCamelCase : Optional[int] = model(_UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : int) ->Optional[Any]: """simple docstring""" _lowerCamelCase : str = self.num_labels _lowerCamelCase : Optional[int] = TFDebertaVaForTokenClassification(config=_UpperCamelCase) _lowerCamelCase : List[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _lowerCamelCase : Any = model(_UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict) ->int: """simple docstring""" _lowerCamelCase : str = TFDebertaVaForQuestionAnswering(config=_UpperCamelCase) _lowerCamelCase : List[str] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _lowerCamelCase : int = model(_UpperCamelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Optional[int] = config_and_inputs _lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) _snake_case = ( { 'feature-extraction': TFDebertaVaModel, 'fill-mask': TFDebertaVaForMaskedLM, 'question-answering': TFDebertaVaForQuestionAnswering, 'text-classification': TFDebertaVaForSequenceClassification, 'token-classification': TFDebertaVaForTokenClassification, 'zero-shot': TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) _snake_case = False _snake_case = False def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: """simple docstring""" _lowerCamelCase : Optional[Any] = TFDebertaVaModelTester(self) _lowerCamelCase : Dict = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : int) ->str: """simple docstring""" self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: """simple docstring""" _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase) @slow def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : Any = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""") self.assertIsNotNone(_UpperCamelCase) @require_tf class __snake_case ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason="""Model not available yet""") def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple: """simple docstring""" pass @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""") _lowerCamelCase : Optional[Any] = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]]) _lowerCamelCase : Optional[int] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) _lowerCamelCase : Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase)[0] _lowerCamelCase : Dict = tf.constant( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]]) tf.debugging.assert_near(output[:, 1:4, 1:4] , _UpperCamelCase , atol=1E-4)
15
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : str =logging.get_logger(__name__) lowerCAmelCase : Dict ={ "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = 'cvt' def __init__( self : Union[str, Any] , _UpperCamelCase : Any=3 , _UpperCamelCase : List[str]=[7, 3, 3] , _UpperCamelCase : Dict=[4, 2, 2] , _UpperCamelCase : List[Any]=[2, 1, 1] , _UpperCamelCase : Union[str, Any]=[64, 192, 384] , _UpperCamelCase : Optional[int]=[1, 3, 6] , _UpperCamelCase : Dict=[1, 2, 10] , _UpperCamelCase : List[Any]=[4.0, 4.0, 4.0] , _UpperCamelCase : str=[0.0, 0.0, 0.0] , _UpperCamelCase : Any=[0.0, 0.0, 0.0] , _UpperCamelCase : List[str]=[0.0, 0.0, 0.1] , _UpperCamelCase : int=[True, True, True] , _UpperCamelCase : Tuple=[False, False, True] , _UpperCamelCase : str=["dw_bn", "dw_bn", "dw_bn"] , _UpperCamelCase : List[Any]=[3, 3, 3] , _UpperCamelCase : Tuple=[1, 1, 1] , _UpperCamelCase : List[str]=[2, 2, 2] , _UpperCamelCase : List[str]=[1, 1, 1] , _UpperCamelCase : int=[1, 1, 1] , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : int=1E-1_2 , **_UpperCamelCase : List[str] , ) ->List[Any]: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : int = num_channels _lowerCamelCase : Tuple = patch_sizes _lowerCamelCase : List[Any] = patch_stride _lowerCamelCase : List[Any] = patch_padding _lowerCamelCase : int = embed_dim _lowerCamelCase : Dict = num_heads _lowerCamelCase : Tuple = depth _lowerCamelCase : Any = mlp_ratio _lowerCamelCase : Tuple = attention_drop_rate _lowerCamelCase : Dict = drop_rate _lowerCamelCase : Any = drop_path_rate _lowerCamelCase : Dict = qkv_bias _lowerCamelCase : int = cls_token _lowerCamelCase : int = qkv_projection_method _lowerCamelCase : List[Any] = kernel_qkv _lowerCamelCase : List[str] = padding_kv _lowerCamelCase : Dict = stride_kv _lowerCamelCase : Any = padding_q _lowerCamelCase : int = stride_q _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps
15
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = {} _lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""] _lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments) lowerCAmelCase : int =parser.parse_args() if args.num_workers is None: lowerCAmelCase : Any =multiprocessing.cpu_count() lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase : str =time.time() lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase : Dict =time.time() lowerCAmelCase : Dict =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
15
1
import os import sys lowerCAmelCase : List[Any] =os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowerCAmelCase : Union[str, Any] =[ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def A__ ( *__A , **__A ): '''simple docstring''' return AutoConfig.from_pretrained(*__A , **__A ) @add_start_docstrings(AutoTokenizer.__doc__ ) def A__ ( *__A , **__A ): '''simple docstring''' return AutoTokenizer.from_pretrained(*__A , **__A ) @add_start_docstrings(AutoModel.__doc__ ) def A__ ( *__A , **__A ): '''simple docstring''' return AutoModel.from_pretrained(*__A , **__A ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def A__ ( *__A , **__A ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*__A , **__A ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def A__ ( *__A , **__A ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*__A , **__A ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def A__ ( *__A , **__A ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*__A , **__A ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def A__ ( *__A , **__A ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*__A , **__A )
15
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = IFPipeline _snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" return self._get_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : int = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa) _lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""") _lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""") del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCamelCase : str = None _lowerCamelCase : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components) _lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components) _lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : str = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Any = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : List[str] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def A__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
1
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCAmelCase : Optional[int] =logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCAmelCase : Dict ="cuda" if torch.cuda.is_available() else "cpu" def A__ ( __A , __A=100 , __A=" " ): '''simple docstring''' _lowerCamelCase : Any = text.split(__A ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )] def A__ ( __A ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Optional[Any] = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(__A ): titles.append(title if title is not None else """""" ) texts.append(__A ) return {"title": titles, "text": texts} def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : Any = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=__A , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] _lowerCamelCase : Optional[Any] = ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def A__ ( __A , __A , __A , ): '''simple docstring''' ###################################### logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way _lowerCamelCase : str = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words _lowerCamelCase : Dict = dataset.map(__A , batched=__A , num_proc=processing_args.num_proc ) # And compute the embeddings _lowerCamelCase : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A ) _lowerCamelCase : List[str] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) _lowerCamelCase : List[str] = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space _lowerCamelCase : Dict = dataset.map( partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , ) # And finally save your dataset _lowerCamelCase : Optional[int] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(__A ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search _lowerCamelCase : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=__A ) # And save the index _lowerCamelCase : Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(__A ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __snake_case : '''simple docstring''' _snake_case = field( default=str(Path(__lowerCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) _snake_case = field( default=__lowerCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) _snake_case = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) _snake_case = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) _snake_case = field( default=str(Path(__lowerCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __snake_case : '''simple docstring''' _snake_case = field( default=__lowerCAmelCase , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) _snake_case = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __snake_case : '''simple docstring''' _snake_case = field( default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) _snake_case = field( default=128 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCAmelCase : List[Any] =HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict =parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCAmelCase : List[str] =rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Any =logging.get_logger(__name__) lowerCAmelCase : List[Any] ={ "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = 'swin' _snake_case = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[str] = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : Union[str, Any] = embed_dim _lowerCamelCase : str = depths _lowerCamelCase : str = len(_UpperCamelCase) _lowerCamelCase : Optional[Any] = num_heads _lowerCamelCase : Tuple = window_size _lowerCamelCase : int = mlp_ratio _lowerCamelCase : Optional[int] = qkv_bias _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Dict = use_absolute_embeddings _lowerCamelCase : int = layer_norm_eps _lowerCamelCase : str = initializer_range _lowerCamelCase : Dict = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1)) _lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)] _lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices( out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = version.parse('1.11' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float: """simple docstring""" return 1E-4
15
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : List[Any] =get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class __snake_case ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = PegasusTokenizer _snake_case = PegasusTokenizerFast _snake_case = True _snake_case = True def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : List[Any] = PegasusTokenizer(_UpperCamelCase) tokenizer.save_pretrained(self.tmpdirname) @cached_property def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: """simple docstring""" return PegasusTokenizer.from_pretrained("""google/pegasus-large""") def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Tuple) ->PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[Any]) ->Dict: """simple docstring""" return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : Optional[int] = """</s>""" _lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase) , _UpperCamelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase) , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<pad>""") self.assertEqual(vocab_keys[1] , """</s>""") self.assertEqual(vocab_keys[-1] , """v""") self.assertEqual(len(_UpperCamelCase) , 1103) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1103) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" _lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname) _lowerCamelCase : Optional[int] = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) _lowerCamelCase : List[Any] = rust_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0] _lowerCamelCase : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word _lowerCamelCase : str = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" _lowerCamelCase : Optional[int] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] _lowerCamelCase : List[Any] = tokenizer([raw_input_str] , return_tensors=_UpperCamelCase).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: """simple docstring""" _lowerCamelCase : int = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 _lowerCamelCase : Any = """To ensure a smooth flow of bank resolutions.""" _lowerCamelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] _lowerCamelCase : Any = tokenizer([raw_input_str] , return_tensors=_UpperCamelCase).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict) ->int: """simple docstring""" _lowerCamelCase : Optional[int] = ["""This is going to be way too long.""" * 150, """short example"""] _lowerCamelCase : Dict = ["""not super long but more than 5 tokens""", """tiny"""] _lowerCamelCase : List[Any] = self._large_tokenizer(_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""") _lowerCamelCase : int = self._large_tokenizer( text_target=_UpperCamelCase , max_length=5 , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""") assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(_UpperCamelCase) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCamelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class __snake_case ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = PegasusTokenizer _snake_case = PegasusTokenizerFast _snake_case = True _snake_case = True def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : Union[str, Any] = PegasusTokenizer(_UpperCamelCase , offset=0 , mask_token_sent=_UpperCamelCase , mask_token="""[MASK]""") tokenizer.save_pretrained(self.tmpdirname) @cached_property def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: """simple docstring""" return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""") def _SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCamelCase : List[Any]) ->PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Dict) ->List[Any]: """simple docstring""" return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname) _lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname) _lowerCamelCase : int = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) _lowerCamelCase : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0] _lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: """simple docstring""" _lowerCamelCase : List[Any] = ["""This is going to be way too long.""" * 1000, """short example"""] _lowerCamelCase : Tuple = ["""not super long but more than 5 tokens""", """tiny"""] _lowerCamelCase : Optional[int] = self._large_tokenizer(_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""") _lowerCamelCase : List[str] = self._large_tokenizer( text_target=_UpperCamelCase , max_length=5 , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""") assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(_UpperCamelCase) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str: """simple docstring""" _lowerCamelCase : Tuple = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) _lowerCamelCase : int = self._large_tokenizer(_UpperCamelCase).input_ids self.assertListEqual( _UpperCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
15
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (EulerDiscreteScheduler,) _snake_case = 10 def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : str = torch.manual_seed(0) _lowerCamelCase : str = self.dummy_model() _lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : int = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Dict = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""") _lowerCamelCase : int = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : Dict = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.prev_sample _lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Tuple = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : List[Any] = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : int = output.prev_sample _lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
15
1
from __future__ import annotations def A__ ( __A , __A , __A ): '''simple docstring''' if days_between_payments <= 0: raise ValueError("""days_between_payments must be > 0""" ) if daily_interest_rate < 0: raise ValueError("""daily_interest_rate must be >= 0""" ) if principal <= 0: raise ValueError("""principal must be > 0""" ) return principal * daily_interest_rate * days_between_payments def A__ ( __A , __A , __A , ): '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError("""number_of_compounding_periods must be > 0""" ) if nominal_annual_interest_rate_percentage < 0: raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" ) if principal <= 0: raise ValueError("""principal must be > 0""" ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def A__ ( __A , __A , __A , ): '''simple docstring''' if number_of_years <= 0: raise ValueError("""number_of_years must be > 0""" ) if nominal_annual_percentage_rate < 0: raise ValueError("""nominal_annual_percentage_rate must be >= 0""" ) if principal <= 0: raise ValueError("""principal must be > 0""" ) return compound_interest( __A , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
15
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : Dict ={"vocab_file": "vocab.json"} lowerCAmelCase : List[str] ={ "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } lowerCAmelCase : int ={"mgp-str": 27} class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]: """simple docstring""" super().__init__( unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , ) with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle: _lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase) _lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()} @property def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" return len(self.vocab) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = [] for s in text: char_tokens.extend(_UpperCamelCase) return char_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict: """simple docstring""" return self.decoder.get(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase)) return _lowerCamelCase : Tuple = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""") return (vocab_file,)
15
1
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Tuple = ["""a""", """b""", """c"""] # Defaults to last layer if both are None _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""c"""]) self.assertEqual(_UpperCamelCase , [2]) # Out indices set to match out features _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features set to match out indices _lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features selected from negative indices _lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [-3, -1]) def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase) # Out features must be a list with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""]) # Out features must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""]) # Out indices must be a list or tuple with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""]) # Out indices must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""]) # Out features and out indices must be the same length with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""]) # Out features should match out indices with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""]) # Out features and out indices should be in order with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""]) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""]) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : int = BackboneMixin() _lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""] _lowerCamelCase : Tuple = ["""a""", """c"""] _lowerCamelCase : List[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly _lowerCamelCase : str = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""]) self.assertEqual(backbone.out_indices , [0, 1]) _lowerCamelCase : Optional[int] = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [-3, -1])
15
1
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
import math def A__ ( __A ): '''simple docstring''' assert isinstance(__A , __A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def A__ ( __A , __A=1 , **__A ): '''simple docstring''' _lowerCamelCase : Dict = factor * value _lowerCamelCase : str = value while not is_prime(__A ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **__A ) return value
15
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Dict =logging.get_logger(__name__) def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: _lowerCamelCase : Dict = 128 elif "12-12" in model_name: _lowerCamelCase : Optional[int] = 12 _lowerCamelCase : Optional[int] = 12 elif "14-14" in model_name: _lowerCamelCase : Any = 14 _lowerCamelCase : Dict = 14 elif "16-16" in model_name: _lowerCamelCase : List[str] = 16 _lowerCamelCase : List[Any] = 16 else: raise ValueError("""Model not supported""" ) _lowerCamelCase : Dict = """huggingface/label-files""" if "speech-commands" in model_name: _lowerCamelCase : str = 35 _lowerCamelCase : Union[str, Any] = """speech-commands-v2-id2label.json""" else: _lowerCamelCase : List[Any] = 527 _lowerCamelCase : Union[str, Any] = """audioset-id2label.json""" _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : int = {int(__A ): v for k, v in idalabel.items()} _lowerCamelCase : Any = idalabel _lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()} return config def A__ ( __A ): '''simple docstring''' if "module.v" in name: _lowerCamelCase : Dict = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: _lowerCamelCase : List[str] = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: _lowerCamelCase : int = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: _lowerCamelCase : str = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: _lowerCamelCase : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: _lowerCamelCase : Any = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: _lowerCamelCase : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: _lowerCamelCase : List[str] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: _lowerCamelCase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _lowerCamelCase : Any = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: _lowerCamelCase : int = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _lowerCamelCase : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: _lowerCamelCase : str = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: _lowerCamelCase : List[str] = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: _lowerCamelCase : Optional[Any] = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def A__ ( __A , __A ): '''simple docstring''' for key in orig_state_dict.copy().keys(): _lowerCamelCase : Dict = orig_state_dict.pop(__A ) if "qkv" in key: _lowerCamelCase : Tuple = key.split(""".""" ) _lowerCamelCase : int = int(key_split[3] ) _lowerCamelCase : Any = config.hidden_size if "weight" in key: _lowerCamelCase : Union[str, Any] = val[:dim, :] _lowerCamelCase : Union[str, Any] = val[dim : dim * 2, :] _lowerCamelCase : List[Any] = val[-dim:, :] else: _lowerCamelCase : Any = val[:dim] _lowerCamelCase : List[Any] = val[dim : dim * 2] _lowerCamelCase : Optional[Any] = val[-dim:] else: _lowerCamelCase : Dict = val return orig_state_dict def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(__A , __A ) @torch.no_grad() def A__ ( __A , __A , __A=False ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = get_audio_spectrogram_transformer_config(__A ) _lowerCamelCase : List[str] = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict _lowerCamelCase : List[Any] = model_name_to_url[model_name] _lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" ) # remove some keys remove_keys(__A ) # rename some keys _lowerCamelCase : str = convert_state_dict(__A , __A ) # load 🤗 model _lowerCamelCase : Tuple = ASTForAudioClassification(__A ) model.eval() model.load_state_dict(__A ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 _lowerCamelCase : Optional[int] = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978 _lowerCamelCase : Dict = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526 _lowerCamelCase : int = 1_024 if """speech-commands""" not in model_name else 128 _lowerCamelCase : List[Any] = ASTFeatureExtractor(mean=__A , std=__A , max_length=__A ) if "speech-commands" in model_name: _lowerCamelCase : str = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) _lowerCamelCase : List[str] = dataset[0]["""audio"""]["""array"""] else: _lowerCamelCase : Tuple = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = torchaudio.load(__A ) _lowerCamelCase : int = waveform.squeeze().numpy() _lowerCamelCase : Optional[Any] = feature_extractor(__A , sampling_rate=16_000 , return_tensors="""pt""" ) # forward pass _lowerCamelCase : Optional[int] = model(**__A ) _lowerCamelCase : Union[str, Any] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": _lowerCamelCase : Optional[Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": _lowerCamelCase : Any = torch.tensor([-1.1_986, -7.0_903, -8.2_718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": _lowerCamelCase : Tuple = torch.tensor([-2.6_128, -8.0_080, -9.4_344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": _lowerCamelCase : Any = torch.tensor([-1.5_080, -7.4_534, -8.8_917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": _lowerCamelCase : Optional[Any] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": _lowerCamelCase : Any = torch.tensor([-0.3_826, -7.0_336, -8.2_413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": _lowerCamelCase : int = torch.tensor([-1.2_113, -6.9_101, -8.3_470] ) elif model_name == "ast-finetuned-speech-commands-v2": _lowerCamelCase : Optional[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , __A , atol=1E-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(__A ).mkdir(exist_ok=__A ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(__A ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(F"""MIT/{model_name}""" ) feature_extractor.push_to_hub(F"""MIT/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase : int =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="ast-finetuned-audioset-10-10-0.4593", type=str, help="Name of the Audio Spectrogram Transformer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCAmelCase : Tuple =parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
15
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple: """simple docstring""" super().__init__(*_UpperCamelCase , **_UpperCamelCase) requires_backends(self , """vision""") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[int] = {} if top_k is not None: _lowerCamelCase : str = top_k return {}, {}, postprocess_params def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict: """simple docstring""" return super().__call__(_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Tuple = load_image(_UpperCamelCase) _lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework) return model_inputs def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Any = self.model(**_UpperCamelCase) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str: """simple docstring""" if top_k > self.model.config.num_labels: _lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": _lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0] _lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase) elif self.framework == "tf": _lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0] _lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase) _lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"""Unsupported framework: {self.framework}""") _lowerCamelCase : str = scores.tolist() _lowerCamelCase : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
15
1
import math def A__ ( __A ): '''simple docstring''' assert isinstance(__A , __A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def A__ ( __A , __A=1 , **__A ): '''simple docstring''' _lowerCamelCase : Dict = factor * value _lowerCamelCase : str = value while not is_prime(__A ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **__A ) return value
15
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = ViTImageProcessor if is_vision_available() else None @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = (3, 32, 128) _lowerCamelCase : str = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase)))) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(_UpperCamelCase) + """\n""") _lowerCamelCase : Any = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) _lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) return image_input def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0) _lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : int = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""") _lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Optional[int] = """test""" _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase) _lowerCamelCase : Dict = tokenizer(_UpperCamelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = """test""" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase): processor() def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Any = processor.char_decode(_UpperCamelCase) _lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase) _lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = None _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = torch.randn(1 , 27 , 38) _lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257) _lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522) _lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
15
1
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py lowerCAmelCase : Any ="src/transformers" lowerCAmelCase : Tuple ="docs/source/en" lowerCAmelCase : Any ="." def A__ ( __A , __A , __A ): '''simple docstring''' with open(__A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _lowerCamelCase : Any = f.readlines() # Find the start prompt. _lowerCamelCase : Dict = 0 while not lines[start_index].startswith(__A ): start_index += 1 start_index += 1 _lowerCamelCase : Union[str, Any] = start_index while not lines[end_index].startswith(__A ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | lowerCAmelCase : Optional[Any] ="Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. lowerCAmelCase : int =re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") lowerCAmelCase : List[str] =re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. lowerCAmelCase : int =re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase : int =direct_transformers_import(TRANSFORMERS_PATH) def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , __A ) return [m.group(0 ) for m in matches] def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 2 if text == """✅""" or text == """❌""" else len(__A ) _lowerCamelCase : List[Any] = (width - text_length) // 2 _lowerCamelCase : List[str] = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES _lowerCamelCase : Dict = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } _lowerCamelCase : Tuple = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. _lowerCamelCase : int = collections.defaultdict(__A ) _lowerCamelCase : int = collections.defaultdict(__A ) _lowerCamelCase : Union[str, Any] = collections.defaultdict(__A ) _lowerCamelCase : Optional[Any] = collections.defaultdict(__A ) _lowerCamelCase : Any = collections.defaultdict(__A ) # Let's lookup through all transformers object (once). for attr_name in dir(__A ): _lowerCamelCase : Tuple = None if attr_name.endswith("""Tokenizer""" ): _lowerCamelCase : int = slow_tokenizers _lowerCamelCase : str = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): _lowerCamelCase : List[Any] = fast_tokenizers _lowerCamelCase : Union[str, Any] = attr_name[:-13] elif _re_tf_models.match(__A ) is not None: _lowerCamelCase : List[str] = tf_models _lowerCamelCase : int = _re_tf_models.match(__A ).groups()[0] elif _re_flax_models.match(__A ) is not None: _lowerCamelCase : Union[str, Any] = flax_models _lowerCamelCase : Optional[Any] = _re_flax_models.match(__A ).groups()[0] elif _re_pt_models.match(__A ) is not None: _lowerCamelCase : Optional[int] = pt_models _lowerCamelCase : Any = _re_pt_models.match(__A ).groups()[0] if lookup_dict is not None: while len(__A ) > 0: if attr_name in model_name_to_prefix.values(): _lowerCamelCase : Tuple = True break # Try again after removing the last word in the name _lowerCamelCase : Optional[int] = """""".join(camel_case_split(__A )[:-1] ) # Let's build that table! _lowerCamelCase : str = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) _lowerCamelCase : Any = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). _lowerCamelCase : Tuple = [len(__A ) + 2 for c in columns] _lowerCamelCase : Optional[int] = max([len(__A ) for name in model_names] ) + 2 # Build the table per se _lowerCamelCase : str = """|""" + """|""".join([_center_text(__A , __A ) for c, w in zip(__A , __A )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" _lowerCamelCase : Any = {True: """✅""", False: """❌"""} for name in model_names: _lowerCamelCase : Any = model_name_to_prefix[name] _lowerCamelCase : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__A , __A ) for l, w in zip(__A , __A )] ) + "|\n" return table def A__ ( __A=False ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = _find_text_in_file( filename=os.path.join(__A , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) _lowerCamelCase : Any = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__A , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") lowerCAmelCase : Optional[int] =parser.parse_args() check_model_table(args.fix_and_overwrite)
15
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__A , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__A ) return parser.parse_args() def A__ ( ): '''simple docstring''' _lowerCamelCase : List[str] = parse_args() # Import training_script as a module. _lowerCamelCase : List[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowerCamelCase : Optional[Any] = script_fpath.stem _lowerCamelCase : Dict = importlib.import_module(__A ) # Patch sys.argv _lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
15
1
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase : int =logging.get_logger(__name__) # General docstring lowerCAmelCase : List[Any] ="RegNetConfig" # Base docstring lowerCAmelCase : Union[str, Any] ="facebook/regnet-y-040" lowerCAmelCase : Optional[int] =[1, 1088, 7, 7] # Image classification docstring lowerCAmelCase : Dict ="facebook/regnet-y-040" lowerCAmelCase : List[Any] ="tabby, tabby cat" lowerCAmelCase : List[Any] =[ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __snake_case ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 1 , _UpperCamelCase : Optional[str] = "relu" , **_UpperCamelCase : str , ) ->Optional[Any]: """simple docstring""" super().__init__(**_UpperCamelCase) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _lowerCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2) _lowerCamelCase : Optional[int] = tf.keras.layers.ConvaD( filters=_UpperCamelCase , kernel_size=_UpperCamelCase , strides=_UpperCamelCase , padding="""VALID""" , groups=_UpperCamelCase , use_bias=_UpperCamelCase , name="""convolution""" , ) _lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""") _lowerCamelCase : Optional[Any] = ACTaFN[activation] if activation is not None else tf.identity def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : int) ->int: """simple docstring""" _lowerCamelCase : List[Any] = self.convolution(self.padding(_UpperCamelCase)) _lowerCamelCase : List[Any] = self.normalization(_UpperCamelCase) _lowerCamelCase : Optional[Any] = self.activation(_UpperCamelCase) return hidden_state class __snake_case ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[Any] , _UpperCamelCase : RegNetConfig , **_UpperCamelCase : Union[str, Any]) ->Union[str, Any]: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[str] = config.num_channels _lowerCamelCase : Any = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Dict) ->Any: """simple docstring""" _lowerCamelCase : Optional[int] = shape_list(_UpperCamelCase)[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""") # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _lowerCamelCase : Union[str, Any] = tf.transpose(_UpperCamelCase , perm=(0, 2, 3, 1)) _lowerCamelCase : Optional[int] = self.embedder(_UpperCamelCase) return hidden_state class __snake_case ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any , _UpperCamelCase : int , _UpperCamelCase : int = 2 , **_UpperCamelCase : List[Any]) ->Dict: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Any = tf.keras.layers.ConvaD( filters=_UpperCamelCase , kernel_size=1 , strides=_UpperCamelCase , use_bias=_UpperCamelCase , name="""convolution""") _lowerCamelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""") def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : tf.Tensor , _UpperCamelCase : bool = False) ->tf.Tensor: """simple docstring""" return self.normalization(self.convolution(_UpperCamelCase) , training=_UpperCamelCase) class __snake_case ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , **_UpperCamelCase : List[Any]) ->Dict: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCamelCase , name="""pooler""") _lowerCamelCase : Optional[Any] = [ tf.keras.layers.ConvaD(filters=_UpperCamelCase , kernel_size=1 , activation="""relu""" , name="""attention.0"""), tf.keras.layers.ConvaD(filters=_UpperCamelCase , kernel_size=1 , activation="""sigmoid""" , name="""attention.2"""), ] def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : int) ->Dict: """simple docstring""" _lowerCamelCase : str = self.pooler(_UpperCamelCase) for layer_module in self.attention: _lowerCamelCase : str = layer_module(_UpperCamelCase) _lowerCamelCase : Dict = hidden_state * pooled return hidden_state class __snake_case ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : RegNetConfig , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 1 , **_UpperCamelCase : List[str]) ->List[Any]: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[Any] = in_channels != out_channels or stride != 1 _lowerCamelCase : Optional[int] = max(1 , out_channels // config.groups_width) _lowerCamelCase : Dict = ( TFRegNetShortCut(_UpperCamelCase , stride=_UpperCamelCase , name="""shortcut""") if should_apply_shortcut else tf.keras.layers.Activation("""linear""" , name="""shortcut""") ) # `self.layers` instead of `self.layer` because that is a reserved argument. _lowerCamelCase : Optional[Any] = [ TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0"""), TFRegNetConvLayer( _UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act , name="""layer.1"""), TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase , name="""layer.2"""), ] _lowerCamelCase : Dict = ACTaFN[config.hidden_act] def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Any) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = hidden_state for layer_module in self.layers: _lowerCamelCase : Optional[Any] = layer_module(_UpperCamelCase) _lowerCamelCase : Optional[Any] = self.shortcut(_UpperCamelCase) hidden_state += residual _lowerCamelCase : Optional[Any] = self.activation(_UpperCamelCase) return hidden_state class __snake_case ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : RegNetConfig , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 1 , **_UpperCamelCase : str) ->Any: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1 _lowerCamelCase : Optional[Any] = max(1 , out_channels // config.groups_width) _lowerCamelCase : Any = ( TFRegNetShortCut(_UpperCamelCase , stride=_UpperCamelCase , name="""shortcut""") if should_apply_shortcut else tf.keras.layers.Activation("""linear""" , name="""shortcut""") ) _lowerCamelCase : Any = [ TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0"""), TFRegNetConvLayer( _UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act , name="""layer.1"""), TFRegNetSELayer(_UpperCamelCase , reduced_channels=int(round(in_channels / 4)) , name="""layer.2"""), TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase , name="""layer.3"""), ] _lowerCamelCase : List[str] = ACTaFN[config.hidden_act] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Optional[Any]) ->Dict: """simple docstring""" _lowerCamelCase : str = hidden_state for layer_module in self.layers: _lowerCamelCase : Tuple = layer_module(_UpperCamelCase) _lowerCamelCase : Optional[int] = self.shortcut(_UpperCamelCase) hidden_state += residual _lowerCamelCase : Any = self.activation(_UpperCamelCase) return hidden_state class __snake_case ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any , _UpperCamelCase : RegNetConfig , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , **_UpperCamelCase : int) ->Optional[Any]: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer _lowerCamelCase : List[Any] = [ # downsampling is done in the first layer with stride of 2 layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , name="""layers.0"""), *[layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , name=F"""layers.{i+1}""") for i in range(depth - 1)], ] def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : str) ->int: """simple docstring""" for layer_module in self.layers: _lowerCamelCase : Union[str, Any] = layer_module(_UpperCamelCase) return hidden_state class __snake_case ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : RegNetConfig , **_UpperCamelCase : Any) ->Any: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Any = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , )) _lowerCamelCase : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:]) for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCamelCase , config.depths[1:])): self.stages.append(TFRegNetStage(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , depth=_UpperCamelCase , name=F"""stages.{i+1}""")) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : tf.Tensor , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True) ->TFBaseModelOutputWithNoAttention: """simple docstring""" _lowerCamelCase : Any = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _lowerCamelCase : Union[str, Any] = hidden_states + (hidden_state,) _lowerCamelCase : Tuple = stage_module(_UpperCamelCase) if output_hidden_states: _lowerCamelCase : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase , hidden_states=_UpperCamelCase) @keras_serializable class __snake_case ( tf.keras.layers.Layer ): '''simple docstring''' _snake_case = RegNetConfig def __init__( self : Optional[Any] , _UpperCamelCase : List[str] , **_UpperCamelCase : Any) ->Tuple: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Optional[Any] = config _lowerCamelCase : Dict = TFRegNetEmbeddings(_UpperCamelCase , name="""embedder""") _lowerCamelCase : Optional[int] = TFRegNetEncoder(_UpperCamelCase , name="""encoder""") _lowerCamelCase : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCamelCase , name="""pooler""") @unpack_inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : tf.Tensor , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention: """simple docstring""" _lowerCamelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCamelCase : Optional[int] = self.embedder(_UpperCamelCase , training=_UpperCamelCase) _lowerCamelCase : int = self.encoder( _UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , training=_UpperCamelCase) _lowerCamelCase : Dict = encoder_outputs[0] _lowerCamelCase : Any = self.pooler(_UpperCamelCase) # Change to NCHW output format have uniformity in the modules _lowerCamelCase : Optional[int] = tf.transpose(_UpperCamelCase , perm=(0, 3, 1, 2)) _lowerCamelCase : Any = tf.transpose(_UpperCamelCase , perm=(0, 3, 1, 2)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _lowerCamelCase : int = tuple([tf.transpose(_UpperCamelCase , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCamelCase , pooler_output=_UpperCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = RegNetConfig _snake_case = 'regnet' _snake_case = 'pixel_values' @property def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa)} lowerCAmelCase : Optional[int] =r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" lowerCAmelCase : int =r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , __lowerCAmelCase , ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : RegNetConfig , *_UpperCamelCase : int , **_UpperCamelCase : List[Any]) ->List[str]: """simple docstring""" super().__init__(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase) _lowerCamelCase : int = TFRegNetMainLayer(_UpperCamelCase , name="""regnet""") @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCamelCase) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : tf.Tensor , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Tuple=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: """simple docstring""" _lowerCamelCase : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict _lowerCamelCase : List[str] = self.regnet( pixel_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , training=_UpperCamelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowerCAmelCase , ) class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' def __init__( self : str , _UpperCamelCase : RegNetConfig , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : List[Any]) ->Dict: """simple docstring""" super().__init__(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase) _lowerCamelCase : Any = config.num_labels _lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(_UpperCamelCase , name="""regnet""") # classification head _lowerCamelCase : Union[str, Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="""classifier.1""") if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_UpperCamelCase) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : tf.Tensor = None , _UpperCamelCase : tf.Tensor = None , _UpperCamelCase : bool = None , _UpperCamelCase : bool = None , _UpperCamelCase : List[Any]=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: """simple docstring""" _lowerCamelCase : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCamelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCamelCase : List[str] = self.regnet( _UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , training=_UpperCamelCase) _lowerCamelCase : Dict = outputs.pooler_output if return_dict else outputs[1] _lowerCamelCase : Any = self.classifier[0](_UpperCamelCase) _lowerCamelCase : List[Any] = self.classifier[1](_UpperCamelCase) _lowerCamelCase : List[Any] = None if labels is None else self.hf_compute_loss(labels=_UpperCamelCase , logits=_UpperCamelCase) if not return_dict: _lowerCamelCase : Any = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_UpperCamelCase , logits=_UpperCamelCase , hidden_states=outputs.hidden_states)
15
def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) if n == 0: return 0 _lowerCamelCase : Tuple = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) _lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def A__ ( __A , __A , __A ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCamelCase : int = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) _lowerCamelCase : Optional[Any] = max_revenue return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _lowerCamelCase : Any = 0 for i in range(1 , n + 1 ): _lowerCamelCase : Any = max_rev[i] for j in range(1 , i + 1 ): _lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) _lowerCamelCase : int = max_revenue_i return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' if n < 0: _lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): _lowerCamelCase : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ F"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = [6, 10, 12, 15, 20, 23] _lowerCamelCase : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCamelCase : Tuple = 36 _lowerCamelCase : Any = top_down_cut_rod(__A , __A ) _lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A ) _lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
15
1
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = IFPipeline _snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" return self._get_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : int = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa) _lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""") _lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""") del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCamelCase : str = None _lowerCamelCase : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components) _lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components) _lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : str = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Any = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : List[str] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def A__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
1
import math class __snake_case : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCamelCase : Any=0) ->str: # a graph with Node 0,1,...,N-1 """simple docstring""" _lowerCamelCase : Optional[Any] = n _lowerCamelCase : Tuple = [ [math.inf for j in range(0 , _UpperCamelCase)] for i in range(0 , _UpperCamelCase) ] # adjacency matrix for weight _lowerCamelCase : List[Any] = [ [math.inf for j in range(0 , _UpperCamelCase)] for i in range(0 , _UpperCamelCase) ] # dp[i][j] stores minimum distance from i to j def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any]) ->List[str]: """simple docstring""" _lowerCamelCase : List[Any] = w def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): _lowerCamelCase : Tuple = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any]) ->Optional[int]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": lowerCAmelCase : Dict =Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
15
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
1
import itertools import math def A__ ( __A ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__ ( ): '''simple docstring''' _lowerCamelCase : Tuple = 2 while True: if is_prime(__A ): yield num num += 1 def A__ ( __A = 10_001 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , __A ) ) if __name__ == "__main__": print(F"""{solution() = }""")
15
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = 42 _snake_case = 42 def __init__( self : Dict , _UpperCamelCase : UNetaDModel , _UpperCamelCase : ScoreSdeVeScheduler) ->Tuple: """simple docstring""" super().__init__() self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase) @torch.no_grad() def __call__( self : Dict , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 2000 , _UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase : Optional[str] = "pil" , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Union[ImagePipelineOutput, Tuple]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.unet.config.sample_size _lowerCamelCase : Optional[int] = (batch_size, 3, img_size, img_size) _lowerCamelCase : int = self.unet _lowerCamelCase : Union[str, Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase) * self.scheduler.init_noise_sigma _lowerCamelCase : List[str] = sample.to(self.device) self.scheduler.set_timesteps(_UpperCamelCase) self.scheduler.set_sigmas(_UpperCamelCase) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): _lowerCamelCase : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): _lowerCamelCase : List[str] = self.unet(_UpperCamelCase , _UpperCamelCase).sample _lowerCamelCase : Optional[Any] = self.scheduler.step_correct(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase).prev_sample # prediction step _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase).sample _lowerCamelCase : Optional[Any] = self.scheduler.step_pred(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase , _lowerCamelCase : Union[str, Any] = output.prev_sample, output.prev_sample_mean _lowerCamelCase : Union[str, Any] = sample_mean.clamp(0 , 1) _lowerCamelCase : Tuple = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _lowerCamelCase : Any = self.numpy_to_pil(_UpperCamelCase) if not return_dict: return (sample,) return ImagePipelineOutput(images=_UpperCamelCase)
15
from __future__ import annotations lowerCAmelCase : int =[] def A__ ( __A , __A , __A ): '''simple docstring''' for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def A__ ( __A , __A ): '''simple docstring''' if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): _lowerCamelCase : int = 1 solve(__A , row + 1 ) _lowerCamelCase : List[str] = 0 return False def A__ ( __A ): '''simple docstring''' for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) lowerCAmelCase : int =8 lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
15
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) lowerCAmelCase : Any ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase : Tuple ={ "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } lowerCAmelCase : Union[str, Any] ={ "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } lowerCAmelCase : Optional[Any] ={ "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } lowerCAmelCase : List[Any] ={ "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } lowerCAmelCase : Tuple ={ "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } lowerCAmelCase : Dict ={ "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } lowerCAmelCase : List[Any] ={ "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } lowerCAmelCase : Optional[Any] ={ "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } lowerCAmelCase : int ={ "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _snake_case = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION _snake_case = DPRContextEncoderTokenizer class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _snake_case = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION _snake_case = DPRQuestionEncoderTokenizer lowerCAmelCase : str =collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) lowerCAmelCase : List[Any] =collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) lowerCAmelCase : List[Any] =r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(__lowerCAmelCase ) class __snake_case : '''simple docstring''' def __call__( self : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Union[bool, str] = False , _UpperCamelCase : Union[bool, str] = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : Optional[bool] = None , **_UpperCamelCase : List[Any] , ) ->BatchEncoding: """simple docstring""" if titles is None and texts is None: return super().__call__( _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , ) elif titles is None or texts is None: _lowerCamelCase : int = titles if texts is None else texts return super().__call__( _UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : Union[str, Any] = titles if not isinstance(_UpperCamelCase , _UpperCamelCase) else [titles] _lowerCamelCase : List[str] = texts if not isinstance(_UpperCamelCase , _UpperCamelCase) else [texts] _lowerCamelCase : Tuple = len(_UpperCamelCase) _lowerCamelCase : List[str] = questions if not isinstance(_UpperCamelCase , _UpperCamelCase) else [questions] * n_passages assert len(_UpperCamelCase) == len( _UpperCamelCase), F"""There should be as many titles than texts but got {len(_UpperCamelCase)} titles and {len(_UpperCamelCase)} texts.""" _lowerCamelCase : Any = super().__call__(_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase)["""input_ids"""] _lowerCamelCase : Optional[int] = super().__call__(_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase)["""input_ids"""] _lowerCamelCase : str = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_UpperCamelCase , _UpperCamelCase) ] } if return_attention_mask is not False: _lowerCamelCase : List[str] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) _lowerCamelCase : str = attention_mask return self.pad(_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : BatchEncoding , _UpperCamelCase : DPRReaderOutput , _UpperCamelCase : int = 16 , _UpperCamelCase : int = 64 , _UpperCamelCase : int = 4 , ) ->List[DPRSpanPrediction]: """simple docstring""" _lowerCamelCase : Dict = reader_input["""input_ids"""] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = reader_output[:3] _lowerCamelCase : Optional[int] = len(_UpperCamelCase) _lowerCamelCase : List[Any] = sorted(range(_UpperCamelCase) , reverse=_UpperCamelCase , key=relevance_logits.__getitem__) _lowerCamelCase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: _lowerCamelCase : List[Any] = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence _lowerCamelCase : List[str] = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: _lowerCamelCase : Union[str, Any] = sequence_ids.index(self.pad_token_id) else: _lowerCamelCase : Dict = len(_UpperCamelCase) _lowerCamelCase : List[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCamelCase , top_spans=_UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCamelCase , start_index=_UpperCamelCase , end_index=_UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(_UpperCamelCase) >= num_spans: break return nbest_spans_predictions[:num_spans] def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : List[int] , _UpperCamelCase : List[int] , _UpperCamelCase : int , _UpperCamelCase : int , ) ->List[DPRSpanPrediction]: """simple docstring""" _lowerCamelCase : Optional[int] = [] for start_index, start_score in enumerate(_UpperCamelCase): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) _lowerCamelCase : int = sorted(_UpperCamelCase , key=lambda _UpperCamelCase: x[1] , reverse=_UpperCamelCase) _lowerCamelCase : Optional[Any] = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]""" _lowerCamelCase : str = end_index - start_index + 1 assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(_UpperCamelCase) == top_spans: break return chosen_span_intervals @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = READER_PRETRAINED_VOCAB_FILES_MAP _snake_case = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = READER_PRETRAINED_INIT_CONFIGURATION _snake_case = ['input_ids', 'attention_mask'] _snake_case = DPRReaderTokenizer
15
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : List[str] ={ "configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"], "feature_extraction_whisper": ["WhisperFeatureExtractor"], "processing_whisper": ["WhisperProcessor"], "tokenization_whisper": ["WhisperTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str =["WhisperTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any =[ "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "WhisperForConditionalGeneration", "WhisperModel", "WhisperPreTrainedModel", "WhisperForAudioClassification", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] =[ "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] =[ "FlaxWhisperForConditionalGeneration", "FlaxWhisperModel", "FlaxWhisperPreTrainedModel", "FlaxWhisperForAudioClassification", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
15
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = 0 for ch in input_str: _lowerCamelCase : Optional[Any] = ord(__A ) _lowerCamelCase : List[str] = pow(2 , __A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
15
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : '''simple docstring''' def __init__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Any=4 , _UpperCamelCase : int=[10, 20, 30, 40] , _UpperCamelCase : Optional[int]=[2, 2, 3, 2] , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]=37 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : Any=10 , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"] , _UpperCamelCase : Tuple=3 , _UpperCamelCase : Optional[Any]=None , ) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : List[str] = image_size _lowerCamelCase : Any = num_channels _lowerCamelCase : Optional[Any] = num_stages _lowerCamelCase : str = hidden_sizes _lowerCamelCase : List[str] = depths _lowerCamelCase : Union[str, Any] = is_training _lowerCamelCase : Dict = use_labels _lowerCamelCase : Union[str, Any] = intermediate_size _lowerCamelCase : Optional[int] = hidden_act _lowerCamelCase : str = type_sequence_label_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : str = out_features _lowerCamelCase : int = num_labels _lowerCamelCase : List[Any] = scope _lowerCamelCase : List[Any] = num_stages def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: """simple docstring""" _lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCamelCase : Any = None if self.use_labels: _lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : List[Any] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]: """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]) ->Tuple: """simple docstring""" _lowerCamelCase : List[Any] = UperNetForSemanticSegmentation(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Tuple = model(_UpperCamelCase) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size)) def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : List[Any] = config_and_inputs _lowerCamelCase : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = (UperNetForSemanticSegmentation,) if is_torch_available() else () _snake_case = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: """simple docstring""" _lowerCamelCase : Union[str, Any] = UperNetModelTester(self) _lowerCamelCase : Dict = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: """simple docstring""" return def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Tuple = model_class(_UpperCamelCase) _lowerCamelCase : Optional[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Optional[int] = [*signature.parameters.keys()] _lowerCamelCase : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase) @unittest.skip(reason="""UperNet does not use inputs_embeds""") def _SCREAMING_SNAKE_CASE ( self : Dict) ->str: """simple docstring""" pass @unittest.skip(reason="""UperNet does not support input and output embeddings""") def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""") def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""") def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: """simple docstring""" def check_hidden_states_output(_UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]): _lowerCamelCase : List[Any] = model_class(_UpperCamelCase) model.to(_UpperCamelCase) model.eval() with torch.no_grad(): _lowerCamelCase : Tuple = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase)) _lowerCamelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCamelCase : Dict = self.model_tester.num_stages self.assertEqual(len(_UpperCamelCase) , expected_num_stages + 1) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Optional[int] = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : str = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Optional[Any] = _config_zero_init(_UpperCamelCase) _lowerCamelCase : int = _config_zero_init(configs_no_init.backbone_config) for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(config=_UpperCamelCase) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason="""UperNet does not have tied weights""") def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: """simple docstring""" pass @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(_UpperCamelCase) self.assertIsNotNone(_UpperCamelCase) def A__ ( ): '''simple docstring''' _lowerCamelCase : List[str] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _lowerCamelCase : Optional[int] = Image.open(__A ).convert("""RGB""" ) return image @require_torch @require_vision @slow class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: """simple docstring""" _lowerCamelCase : Optional[int] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""") _lowerCamelCase : str = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""").to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = prepare_img() _lowerCamelCase : Optional[Any] = processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase) with torch.no_grad(): _lowerCamelCase : Any = model(**_UpperCamelCase) _lowerCamelCase : List[Any] = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape , _UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]]).to(_UpperCamelCase) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase : Dict = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""") _lowerCamelCase : Tuple = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""").to(_UpperCamelCase) _lowerCamelCase : Dict = prepare_img() _lowerCamelCase : str = processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase) with torch.no_grad(): _lowerCamelCase : str = model(**_UpperCamelCase) _lowerCamelCase : List[str] = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape , _UpperCamelCase) _lowerCamelCase : List[str] = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]]).to(_UpperCamelCase) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4))
15
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""") _lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : str = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""") _lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
15
1
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase : List[Any] ="hf-internal-testing/tiny-random-bert" lowerCAmelCase : List[Any] =os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") lowerCAmelCase : List[str] ="9b8c223d42b2188cb49d29af482996f9d0f3e5a6" class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Tuple = cached_file(_UpperCamelCase , _UpperCamelCase) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCamelCase)) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCamelCase , _UpperCamelCase))) with open(os.path.join(_UpperCamelCase , """refs""" , """main""")) as f: _lowerCamelCase : Optional[int] = f.read() self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , """snapshots""" , _UpperCamelCase , _UpperCamelCase)) self.assertTrue(os.path.isfile(_UpperCamelCase)) # File is cached at the same place the second time. _lowerCamelCase : Optional[int] = cached_file(_UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , _UpperCamelCase) # Using a specific revision to test the full commit hash. _lowerCamelCase : str = cached_file(_UpperCamelCase , _UpperCamelCase , revision="""9b8c223""") self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , """snapshots""" , _UpperCamelCase , _UpperCamelCase)) def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: """simple docstring""" with self.assertRaisesRegex(_UpperCamelCase , """is not a valid model identifier"""): _lowerCamelCase : List[str] = cached_file("""tiny-random-bert""" , _UpperCamelCase) with self.assertRaisesRegex(_UpperCamelCase , """is not a valid git identifier"""): _lowerCamelCase : List[str] = cached_file(_UpperCamelCase , _UpperCamelCase , revision="""aaaa""") with self.assertRaisesRegex(_UpperCamelCase , """does not appear to have a file named"""): _lowerCamelCase : int = cached_file(_UpperCamelCase , """conf""") def _SCREAMING_SNAKE_CASE ( self : int) ->str: """simple docstring""" with self.assertRaisesRegex(_UpperCamelCase , """does not appear to have a file named"""): _lowerCamelCase : List[str] = cached_file(_UpperCamelCase , """conf""") with open(os.path.join(_UpperCamelCase , """refs""" , """main""")) as f: _lowerCamelCase : Dict = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCamelCase , """.no_exist""" , _UpperCamelCase , """conf"""))) _lowerCamelCase : List[str] = cached_file(_UpperCamelCase , """conf""" , _raise_exceptions_for_missing_entries=_UpperCamelCase) self.assertIsNone(_UpperCamelCase) _lowerCamelCase : str = cached_file(_UpperCamelCase , """conf""" , local_files_only=_UpperCamelCase , _raise_exceptions_for_missing_entries=_UpperCamelCase) self.assertIsNone(_UpperCamelCase) _lowerCamelCase : List[Any] = mock.Mock() _lowerCamelCase : List[Any] = 500 _lowerCamelCase : Any = {} _lowerCamelCase : List[Any] = HTTPError _lowerCamelCase : Any = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=_UpperCamelCase) as mock_head: _lowerCamelCase : Optional[int] = cached_file(_UpperCamelCase , """conf""" , _raise_exceptions_for_connection_errors=_UpperCamelCase) self.assertIsNone(_UpperCamelCase) # This check we did call the fake head request mock_head.assert_called() def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: """simple docstring""" self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _UpperCamelCase)) self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _UpperCamelCase)) self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _UpperCamelCase)) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: """simple docstring""" self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""")) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCamelCase , """is not a valid model identifier"""): get_file_from_repo("""bert-base-case""" , _UpperCamelCase) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCamelCase , """is not a valid git identifier"""): get_file_from_repo("""bert-base-cased""" , _UpperCamelCase , revision="""ahaha""") _lowerCamelCase : Union[str, Any] = get_file_from_repo("""bert-base-cased""" , _UpperCamelCase) # The name is the cached name which is not very easy to test, so instead we load the content. _lowerCamelCase : Tuple = json.loads(open(_UpperCamelCase , """r""").read()) self.assertEqual(config["""hidden_size"""] , 768) def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _lowerCamelCase : List[str] = Path(_UpperCamelCase) / """a.txt""" filename.touch() self.assertEqual(get_file_from_repo(_UpperCamelCase , """a.txt""") , str(_UpperCamelCase)) self.assertIsNone(get_file_from_repo(_UpperCamelCase , """b.txt"""))
15
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
1
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : '''simple docstring''' def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : str=13 , _UpperCamelCase : str=32 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : List[Any]=[10, 20, 30, 40] , _UpperCamelCase : int=[2, 2, 3, 2] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[str]=True , _UpperCamelCase : Dict=37 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Optional[Any]=10 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : List[Any]=["stage2", "stage3", "stage4"] , _UpperCamelCase : Union[str, Any]=[2, 3, 4] , _UpperCamelCase : Optional[int]=None , ) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[int] = parent _lowerCamelCase : Union[str, Any] = batch_size _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : Optional[Any] = num_channels _lowerCamelCase : Optional[Any] = num_stages _lowerCamelCase : int = hidden_sizes _lowerCamelCase : Tuple = depths _lowerCamelCase : List[str] = is_training _lowerCamelCase : Tuple = use_labels _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : str = num_labels _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : List[str] = out_features _lowerCamelCase : Optional[int] = out_indices _lowerCamelCase : List[Any] = scope def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCamelCase : Dict = None if self.use_labels: _lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels) _lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Dict = ConvNextModel(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : str = model(_UpperCamelCase) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Any) ->int: """simple docstring""" _lowerCamelCase : Optional[Any] = ConvNextForImageClassification(_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : str = model(_UpperCamelCase , labels=_UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict) ->Tuple: """simple docstring""" _lowerCamelCase : Optional[int] = ConvNextBackbone(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Optional[Any] = model(_UpperCamelCase) # verify hidden states self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None _lowerCamelCase : Optional[Any] = None _lowerCamelCase : List[Any] = ConvNextBackbone(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : str = model(_UpperCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: """simple docstring""" _lowerCamelCase : int = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs _lowerCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) _snake_case = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) _snake_case = True _snake_case = False _snake_case = False _snake_case = False _snake_case = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase : List[Any] = ConvNextModelTester(self) _lowerCamelCase : List[str] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" return @unittest.skip(reason="""ConvNext does not use inputs_embeds""") def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: """simple docstring""" pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""") def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""") def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(_UpperCamelCase) _lowerCamelCase : Tuple = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Any = [*signature.parameters.keys()] _lowerCamelCase : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: """simple docstring""" _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple: """simple docstring""" def check_hidden_states_output(_UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : str): _lowerCamelCase : Union[str, Any] = model_class(_UpperCamelCase) model.to(_UpperCamelCase) model.eval() with torch.no_grad(): _lowerCamelCase : Dict = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase)) _lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCamelCase : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(_UpperCamelCase) , expected_num_stages + 1) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Union[str, Any] = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : Any = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase) @slow def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : int = ConvNextModel.from_pretrained(_UpperCamelCase) self.assertIsNotNone(_UpperCamelCase) def A__ ( ): '''simple docstring''' _lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any: """simple docstring""" return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""") if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self : int) ->Dict: """simple docstring""" _lowerCamelCase : List[str] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""").to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase) # forward pass with torch.no_grad(): _lowerCamelCase : List[Any] = model(**_UpperCamelCase) # verify the logits _lowerCamelCase : str = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , _UpperCamelCase) _lowerCamelCase : Any = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1]).to(_UpperCamelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4)) @require_torch class __snake_case ( unittest.TestCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = (ConvNextBackbone,) if is_torch_available() else () _snake_case = ConvNextConfig _snake_case = False def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Tuple = ConvNextModelTester(self)
15
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def A__ ( __A ): '''simple docstring''' _lowerCamelCase : List[str] = 384 if "tiny" in model_name: _lowerCamelCase : Optional[Any] = [3, 3, 9, 3] _lowerCamelCase : Optional[int] = [96, 192, 384, 768] if "small" in model_name: _lowerCamelCase : List[Any] = [3, 3, 27, 3] _lowerCamelCase : Optional[int] = [96, 192, 384, 768] if "base" in model_name: _lowerCamelCase : int = [3, 3, 27, 3] _lowerCamelCase : Optional[int] = [128, 256, 512, 1_024] _lowerCamelCase : List[Any] = 512 if "large" in model_name: _lowerCamelCase : List[Any] = [3, 3, 27, 3] _lowerCamelCase : int = [192, 384, 768, 1_536] _lowerCamelCase : int = 768 if "xlarge" in model_name: _lowerCamelCase : List[Any] = [3, 3, 27, 3] _lowerCamelCase : Dict = [256, 512, 1_024, 2_048] _lowerCamelCase : Optional[Any] = 1_024 # set label information _lowerCamelCase : Dict = 150 _lowerCamelCase : int = """huggingface/label-files""" _lowerCamelCase : List[Any] = """ade20k-id2label.json""" _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : Optional[int] = {int(__A ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()} _lowerCamelCase : str = ConvNextConfig( depths=__A , hidden_sizes=__A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) _lowerCamelCase : int = UperNetConfig( backbone_config=__A , auxiliary_in_channels=__A , num_labels=__A , idalabel=__A , labelaid=__A , ) return config def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Dict = [] # fmt: off # stem rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") ) rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") ) rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") ) rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") ) if i > 0: rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : Any = dct.pop(__A ) _lowerCamelCase : Optional[int] = val def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : Any = { """upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""", """upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""", """upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""", """upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""", """upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""", } _lowerCamelCase : Tuple = model_name_to_url[model_name] _lowerCamelCase : Optional[int] = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" )["""state_dict"""] _lowerCamelCase : Optional[int] = get_upernet_config(__A ) _lowerCamelCase : Any = UperNetForSemanticSegmentation(__A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _lowerCamelCase : Optional[Any] = state_dict.pop(__A ) if "bn" in key: _lowerCamelCase : str = key.replace("""bn""" , """batch_norm""" ) _lowerCamelCase : str = val # rename keys _lowerCamelCase : str = create_rename_keys(__A ) for src, dest in rename_keys: rename_key(__A , __A , __A ) model.load_state_dict(__A ) # verify on image _lowerCamelCase : Union[str, Any] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" _lowerCamelCase : Any = Image.open(requests.get(__A , stream=__A ).raw ).convert("""RGB""" ) _lowerCamelCase : Optional[int] = SegformerImageProcessor() _lowerCamelCase : List[str] = processor(__A , return_tensors="""pt""" ).pixel_values with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(__A ) if model_name == "upernet-convnext-tiny": _lowerCamelCase : Optional[int] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ) elif model_name == "upernet-convnext-small": _lowerCamelCase : Any = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] ) elif model_name == "upernet-convnext-base": _lowerCamelCase : Dict = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] ) elif model_name == "upernet-convnext-large": _lowerCamelCase : Optional[int] = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] ) elif model_name == "upernet-convnext-xlarge": _lowerCamelCase : Tuple = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __A , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__A ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase : str =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-convnext-tiny", type=str, choices=[F"""upernet-convnext-{size}""" for size in ["tiny", "small", "base", "large", "xlarge"]], help="Name of the ConvNext UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCAmelCase : Tuple =parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
15
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
1
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['image_processor', 'tokenizer'] _snake_case = 'OwlViTImageProcessor' _snake_case = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : str , _UpperCamelCase : Dict=None , _UpperCamelCase : Any=None , **_UpperCamelCase : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : Dict = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _UpperCamelCase , ) _lowerCamelCase : Optional[int] = kwargs.pop("""feature_extractor""") _lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_UpperCamelCase , _UpperCamelCase) def __call__( self : int , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Dict="max_length" , _UpperCamelCase : List[str]="np" , **_UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""") if text is not None: if isinstance(_UpperCamelCase , _UpperCamelCase) or (isinstance(_UpperCamelCase , _UpperCamelCase) and not isinstance(text[0] , _UpperCamelCase)): _lowerCamelCase : int = [self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase)] elif isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(text[0] , _UpperCamelCase): _lowerCamelCase : Union[str, Any] = [] # Maximum number of queries across batch _lowerCamelCase : List[str] = max([len(_UpperCamelCase) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCamelCase) != max_num_queries: _lowerCamelCase : Any = t + [""" """] * (max_num_queries - len(_UpperCamelCase)) _lowerCamelCase : int = self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase) encodings.append(_UpperCamelCase) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""") if return_tensors == "np": _lowerCamelCase : List[str] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0) _lowerCamelCase : Union[str, Any] = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _lowerCamelCase : str = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0) _lowerCamelCase : Optional[Any] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _lowerCamelCase : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0) _lowerCamelCase : List[Any] = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _lowerCamelCase : List[str] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0) _lowerCamelCase : List[str] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0) else: raise ValueError("""Target return tensor type could not be returned""") _lowerCamelCase : Any = BatchEncoding() _lowerCamelCase : str = input_ids _lowerCamelCase : Union[str, Any] = attention_mask if query_images is not None: _lowerCamelCase : Optional[Any] = BatchEncoding() _lowerCamelCase : int = self.image_processor( _UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase).pixel_values _lowerCamelCase : Tuple = query_pixel_values if images is not None: _lowerCamelCase : Any = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase) if text is not None and images is not None: _lowerCamelCase : List[str] = image_features.pixel_values return encoding elif query_images is not None and images is not None: _lowerCamelCase : List[str] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCamelCase) , tensor_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *_UpperCamelCase : int , **_UpperCamelCase : int) ->Tuple: """simple docstring""" return self.image_processor.post_process(*_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : List[Any]) ->Union[str, Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , *_UpperCamelCase : Dict , **_UpperCamelCase : str) ->int: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *_UpperCamelCase : Any , **_UpperCamelCase : str) ->List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , *_UpperCamelCase : Tuple , **_UpperCamelCase : List[Any]) ->Any: """simple docstring""" return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _UpperCamelCase , ) return self.image_processor_class @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _UpperCamelCase , ) return self.image_processor
15
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
1
lowerCAmelCase : Optional[int] =[0, 2, 4, 6, 8] lowerCAmelCase : Tuple =[1, 3, 5, 7, 9] def A__ ( __A , __A , __A , __A ): '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 _lowerCamelCase : Union[str, Any] = 0 for digit in range(10 ): _lowerCamelCase : Optional[int] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , __A , __A ) return result _lowerCamelCase : Optional[Any] = 0 for digita in range(10 ): _lowerCamelCase : Tuple = digita if (remainder + digita) % 2 == 0: _lowerCamelCase : Tuple = ODD_DIGITS else: _lowerCamelCase : Optional[int] = EVEN_DIGITS for digita in other_parity_digits: _lowerCamelCase : Tuple = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , __A , __A , ) return result def A__ ( __A = 9 ): '''simple docstring''' _lowerCamelCase : List[str] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__A , 0 , [0] * length , __A ) return result if __name__ == "__main__": print(F"""{solution() = }""")
15
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = {} _lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""] _lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments) lowerCAmelCase : int =parser.parse_args() if args.num_workers is None: lowerCAmelCase : Any =multiprocessing.cpu_count() lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase : str =time.time() lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase : Dict =time.time() lowerCAmelCase : Dict =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
15
1
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Optional[Any] = [0] * len(__A ) _lowerCamelCase : int = [] _lowerCamelCase : List[Any] = [1] * len(__A ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__A ) ): if indegree[i] == 0: queue.append(__A ) while queue: _lowerCamelCase : str = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: _lowerCamelCase : int = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(__A ) print(max(__A ) ) # Adjacency list of Graph lowerCAmelCase : Optional[Any] ={0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
15
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = IFPipeline _snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" return self._get_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : int = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa) _lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""") _lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""") del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCamelCase : str = None _lowerCamelCase : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components) _lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components) _lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : str = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Any = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : List[str] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def A__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
1
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) lowerCAmelCase : Optional[Any] =logging.getLogger(__name__) lowerCAmelCase : Union[str, Any] ="Hello world! cécé herlolip" lowerCAmelCase : str =namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = BertAbsConfig( temp_dir=""".""" , finetune_bert=__A , large=__A , share_emb=__A , use_bert_emb=__A , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , ) _lowerCamelCase : Dict = torch.load(__A , lambda __A , __A : storage ) _lowerCamelCase : List[Any] = AbsSummarizer(__A , torch.device("""cpu""" ) , __A ) original.eval() _lowerCamelCase : Optional[Any] = BertAbsSummarizer(__A , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) _lowerCamelCase : Dict = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs _lowerCamelCase : Union[str, Any] = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__A )) ) _lowerCamelCase : List[str] = torch.tensor(__A ).unsqueeze(0 ) _lowerCamelCase : int = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__A )) ) _lowerCamelCase : int = torch.tensor(__A ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass _lowerCamelCase : Union[str, Any] = encoder_input_ids _lowerCamelCase : str = decoder_input_ids _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : int = None _lowerCamelCase : Tuple = None _lowerCamelCase : Any = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical _lowerCamelCase : Optional[int] = original(__A , __A , __A , __A , __A , __A , __A )[0] _lowerCamelCase : Union[str, Any] = original.generator(__A ) _lowerCamelCase : Dict = new_model( __A , __A , __A , __A , __A )[0] _lowerCamelCase : int = new_model.generator(__A ) _lowerCamelCase : Optional[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(__A ) ) _lowerCamelCase : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(__A ) ) _lowerCamelCase : Any = torch.allclose(__A , __A , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": lowerCAmelCase : str =argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) lowerCAmelCase : Optional[int] =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Any =logging.get_logger(__name__) lowerCAmelCase : List[Any] ={ "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = 'swin' _snake_case = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[str] = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : Union[str, Any] = embed_dim _lowerCamelCase : str = depths _lowerCamelCase : str = len(_UpperCamelCase) _lowerCamelCase : Optional[Any] = num_heads _lowerCamelCase : Tuple = window_size _lowerCamelCase : int = mlp_ratio _lowerCamelCase : Optional[int] = qkv_bias _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Dict = use_absolute_embeddings _lowerCamelCase : int = layer_norm_eps _lowerCamelCase : str = initializer_range _lowerCamelCase : Dict = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1)) _lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)] _lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices( out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = version.parse('1.11' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float: """simple docstring""" return 1E-4
15
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : str ={ "configuration_trajectory_transformer": [ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrajectoryTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] =[ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TrajectoryTransformerModel", "TrajectoryTransformerPreTrainedModel", "load_tf_weights_in_trajectory_transformer", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
15
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (EulerDiscreteScheduler,) _snake_case = 10 def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : str = torch.manual_seed(0) _lowerCamelCase : str = self.dummy_model() _lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : int = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Dict = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""") _lowerCamelCase : int = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : Dict = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.prev_sample _lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Tuple = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : List[Any] = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : int = output.prev_sample _lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
15
1
import os # Precomputes a list of the 100 first triangular numbers lowerCAmelCase : Tuple =[int(0.5 * n * (n + 1)) for n in range(1, 101)] def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[Any] = os.path.dirname(os.path.realpath(__A ) ) _lowerCamelCase : Tuple = os.path.join(__A , """words.txt""" ) _lowerCamelCase : int = """""" with open(__A ) as f: _lowerCamelCase : Union[str, Any] = f.readline() _lowerCamelCase : Tuple = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] _lowerCamelCase : Optional[Any] = [ word for word in [sum(ord(__A ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__A ) if __name__ == "__main__": print(solution())
15
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : Dict ={"vocab_file": "vocab.json"} lowerCAmelCase : List[str] ={ "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } lowerCAmelCase : int ={"mgp-str": 27} class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]: """simple docstring""" super().__init__( unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , ) with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle: _lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase) _lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()} @property def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" return len(self.vocab) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = [] for s in text: char_tokens.extend(_UpperCamelCase) return char_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict: """simple docstring""" return self.decoder.get(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase)) return _lowerCamelCase : Tuple = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""") return (vocab_file,)
15
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : List[Any] =logging.get_logger(__name__) lowerCAmelCase : str ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase : Optional[int] ={ "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, "tokenizer_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", }, } lowerCAmelCase : Optional[Any] ={ "gpt2": 1024, "gpt2-medium": 1024, "gpt2-large": 1024, "gpt2-xl": 1024, "distilgpt2": 1024, } class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ['input_ids', 'attention_mask'] _snake_case = GPTaTokenizer def __init__( self : Any , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : int=None , _UpperCamelCase : Tuple="<|endoftext|>" , _UpperCamelCase : Optional[Any]="<|endoftext|>" , _UpperCamelCase : List[str]="<|endoftext|>" , _UpperCamelCase : Optional[int]=False , **_UpperCamelCase : Dict , ) ->List[str]: """simple docstring""" super().__init__( _UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : Optional[Any] = kwargs.pop("""add_bos_token""" , _UpperCamelCase) _lowerCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , _UpperCamelCase) != add_prefix_space: _lowerCamelCase : Optional[int] = getattr(_UpperCamelCase , pre_tok_state.pop("""type""")) _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : List[Any] = pre_tok_class(**_UpperCamelCase) _lowerCamelCase : Optional[Any] = add_prefix_space def _SCREAMING_SNAKE_CASE ( self : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[int]) ->BatchEncoding: """simple docstring""" _lowerCamelCase : int = kwargs.get("""is_split_into_words""" , _UpperCamelCase) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Dict) ->BatchEncoding: """simple docstring""" _lowerCamelCase : List[Any] = kwargs.get("""is_split_into_words""" , _UpperCamelCase) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" _lowerCamelCase : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase) return tuple(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : "Conversation") ->List[int]: """simple docstring""" _lowerCamelCase : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase) + [self.eos_token_id]) if len(_UpperCamelCase) > self.model_max_length: _lowerCamelCase : str = input_ids[-self.model_max_length :] return input_ids
15
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Tuple = ["""a""", """b""", """c"""] # Defaults to last layer if both are None _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""c"""]) self.assertEqual(_UpperCamelCase , [2]) # Out indices set to match out features _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features set to match out indices _lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features selected from negative indices _lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [-3, -1]) def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase) # Out features must be a list with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""]) # Out features must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""]) # Out indices must be a list or tuple with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""]) # Out indices must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""]) # Out features and out indices must be the same length with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""]) # Out features should match out indices with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""]) # Out features and out indices should be in order with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""]) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""]) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : int = BackboneMixin() _lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""] _lowerCamelCase : Tuple = ["""a""", """c"""] _lowerCamelCase : List[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly _lowerCamelCase : str = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""]) self.assertEqual(backbone.out_indices , [0, 1]) _lowerCamelCase : Optional[int] = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [-3, -1])
15
1
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : int=2 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : Union[str, Any]=16 , _UpperCamelCase : List[str]=[32, 64, 128] , _UpperCamelCase : Any=[1, 2, 1] , _UpperCamelCase : Tuple=[2, 2, 4] , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=2.0 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : int=True , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : List[Any]=1E-5 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Any=None , _UpperCamelCase : str=True , _UpperCamelCase : Dict=10 , _UpperCamelCase : Dict=8 , _UpperCamelCase : Any=["stage1", "stage2"] , _UpperCamelCase : Dict=[1, 2] , ) ->str: """simple docstring""" _lowerCamelCase : List[Any] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : Optional[Any] = patch_size _lowerCamelCase : int = num_channels _lowerCamelCase : Any = embed_dim _lowerCamelCase : Union[str, Any] = hidden_sizes _lowerCamelCase : str = depths _lowerCamelCase : List[Any] = num_heads _lowerCamelCase : Union[str, Any] = window_size _lowerCamelCase : Optional[Any] = mlp_ratio _lowerCamelCase : List[str] = qkv_bias _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = drop_path_rate _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : str = use_absolute_embeddings _lowerCamelCase : List[str] = patch_norm _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : int = initializer_range _lowerCamelCase : str = is_training _lowerCamelCase : Optional[int] = scope _lowerCamelCase : str = use_labels _lowerCamelCase : List[Any] = type_sequence_label_size _lowerCamelCase : Any = encoder_stride _lowerCamelCase : List[Any] = out_features _lowerCamelCase : str = out_indices def _SCREAMING_SNAKE_CASE ( self : str) ->str: """simple docstring""" _lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCamelCase : str = None if self.use_labels: _lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : Tuple = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: """simple docstring""" return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int) ->Dict: """simple docstring""" _lowerCamelCase : List[Any] = FocalNetModel(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Optional[Any] = model(_UpperCamelCase) _lowerCamelCase : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _lowerCamelCase : int = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]) ->str: """simple docstring""" _lowerCamelCase : Dict = FocalNetBackbone(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Any = model(_UpperCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1]) # verify backbone works with out_features=None _lowerCamelCase : List[Any] = None _lowerCamelCase : int = FocalNetBackbone(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Tuple = model(_UpperCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]) ->str: """simple docstring""" _lowerCamelCase : Dict = FocalNetForMaskedImageModeling(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Tuple = model(_UpperCamelCase) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images _lowerCamelCase : Tuple = 1 _lowerCamelCase : int = FocalNetForMaskedImageModeling(_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _lowerCamelCase : List[Any] = model(_UpperCamelCase) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict) ->Dict: """simple docstring""" _lowerCamelCase : str = self.type_sequence_label_size _lowerCamelCase : Optional[int] = FocalNetForImageClassification(_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Any = model(_UpperCamelCase , labels=_UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images _lowerCamelCase : List[str] = 1 _lowerCamelCase : int = FocalNetForImageClassification(_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _lowerCamelCase : List[Any] = model(_UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: """simple docstring""" _lowerCamelCase : Any = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = config_and_inputs _lowerCamelCase : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) _snake_case = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : int = FocalNetModelTester(self) _lowerCamelCase : str = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=37 , has_text_modality=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" return def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: """simple docstring""" _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: """simple docstring""" _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase) @unittest.skip(reason="""FocalNet does not use inputs_embeds""") def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple: """simple docstring""" pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""") def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCamelCase : Tuple = model_class(_UpperCamelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _lowerCamelCase : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear)) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCamelCase : List[str] = model_class(_UpperCamelCase) _lowerCamelCase : Dict = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()] _lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int) ->str: """simple docstring""" _lowerCamelCase : Dict = model_class(_UpperCamelCase) model.to(_UpperCamelCase) model.eval() with torch.no_grad(): _lowerCamelCase : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase)) _lowerCamelCase : Union[str, Any] = outputs.hidden_states _lowerCamelCase : Optional[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths) + 1) self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase) # FocalNet has a different seq_length _lowerCamelCase : List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) _lowerCamelCase : str = outputs.reshaped_hidden_states self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = reshaped_hidden_states[0].shape _lowerCamelCase : Dict = ( reshaped_hidden_states[0].view(_UpperCamelCase , _UpperCamelCase , height * width).permute(0 , 2 , 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _lowerCamelCase : List[str] = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : Tuple = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : int = 3 _lowerCamelCase : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowerCamelCase : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _lowerCamelCase : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _lowerCamelCase : Dict = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : Dict = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width)) @slow def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Optional[Any] = FocalNetModel.from_pretrained(_UpperCamelCase) self.assertIsNotNone(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->int: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : List[str] = _config_zero_init(_UpperCamelCase) for model_class in self.all_model_classes: _lowerCamelCase : Any = model_class(config=_UpperCamelCase) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: """simple docstring""" return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""") if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple: """simple docstring""" _lowerCamelCase : Dict = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""").to(_UpperCamelCase) _lowerCamelCase : Dict = self.default_image_processor _lowerCamelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") _lowerCamelCase : List[str] = image_processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase) # forward pass with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**_UpperCamelCase) # verify the logits _lowerCamelCase : List[Any] = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , _UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1]).to(_UpperCamelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4)) self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281) @require_torch class __snake_case ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = (FocalNetBackbone,) if is_torch_available() else () _snake_case = FocalNetConfig _snake_case = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[Any] = FocalNetModelTester(self)
15
import math def A__ ( __A ): '''simple docstring''' assert isinstance(__A , __A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def A__ ( __A , __A=1 , **__A ): '''simple docstring''' _lowerCamelCase : Dict = factor * value _lowerCamelCase : str = value while not is_prime(__A ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **__A ) return value
15
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : List[Any] ={ "configuration_clap": [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", ], "processing_clap": ["ClapProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int =[ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ] lowerCAmelCase : Optional[Any] =["ClapFeatureExtractor"] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
15
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple: """simple docstring""" super().__init__(*_UpperCamelCase , **_UpperCamelCase) requires_backends(self , """vision""") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[int] = {} if top_k is not None: _lowerCamelCase : str = top_k return {}, {}, postprocess_params def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict: """simple docstring""" return super().__call__(_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Tuple = load_image(_UpperCamelCase) _lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework) return model_inputs def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Any = self.model(**_UpperCamelCase) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str: """simple docstring""" if top_k > self.model.config.num_labels: _lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": _lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0] _lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase) elif self.framework == "tf": _lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0] _lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase) _lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"""Unsupported framework: {self.framework}""") _lowerCamelCase : str = scores.tolist() _lowerCamelCase : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
15
1
from __future__ import annotations def A__ ( __A ): '''simple docstring''' _lowerCamelCase : List[str] = 0.00 _lowerCamelCase : Optional[int] = 0 for resistor in resistors: if resistor <= 0: _lowerCamelCase : List[str] = F"""Resistor at index {index} has a negative or zero value!""" raise ValueError(__A ) first_sum += 1 / float(__A ) index += 1 return 1 / first_sum def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = 0.00 _lowerCamelCase : Optional[int] = 0 for resistor in resistors: sum_r += resistor if resistor < 0: _lowerCamelCase : Optional[Any] = F"""Resistor at index {index} has a negative value!""" raise ValueError(__A ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
15
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = ViTImageProcessor if is_vision_available() else None @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = (3, 32, 128) _lowerCamelCase : str = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase)))) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(_UpperCamelCase) + """\n""") _lowerCamelCase : Any = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) _lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) return image_input def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0) _lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : int = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""") _lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Optional[int] = """test""" _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase) _lowerCamelCase : Dict = tokenizer(_UpperCamelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = """test""" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase): processor() def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Any = processor.char_decode(_UpperCamelCase) _lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase) _lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = None _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = torch.randn(1 , 27 , 38) _lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257) _lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522) _lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
15
1
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Tuple = ["""a""", """b""", """c"""] # Defaults to last layer if both are None _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""c"""]) self.assertEqual(_UpperCamelCase , [2]) # Out indices set to match out features _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features set to match out indices _lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features selected from negative indices _lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [-3, -1]) def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase) # Out features must be a list with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""]) # Out features must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""]) # Out indices must be a list or tuple with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""]) # Out indices must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""]) # Out features and out indices must be the same length with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""]) # Out features should match out indices with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""]) # Out features and out indices should be in order with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""]) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""]) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : int = BackboneMixin() _lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""] _lowerCamelCase : Tuple = ["""a""", """c"""] _lowerCamelCase : List[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly _lowerCamelCase : str = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""]) self.assertEqual(backbone.out_indices , [0, 1]) _lowerCamelCase : Optional[int] = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [-3, -1])
15
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__A , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__A ) return parser.parse_args() def A__ ( ): '''simple docstring''' _lowerCamelCase : List[str] = parse_args() # Import training_script as a module. _lowerCamelCase : List[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowerCamelCase : Optional[Any] = script_fpath.stem _lowerCamelCase : Dict = importlib.import_module(__A ) # Patch sys.argv _lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
15
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : List[str] =logging.get_logger(__name__) def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Optional[Any] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: _lowerCamelCase : Tuple = [144, 192, 240] _lowerCamelCase : str = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: _lowerCamelCase : List[Any] = [96, 120, 144] _lowerCamelCase : Optional[int] = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: _lowerCamelCase : Union[str, Any] = [64, 80, 96] _lowerCamelCase : Tuple = [16, 16, 24, 48, 64, 80, 320] _lowerCamelCase : Any = 0.05 _lowerCamelCase : Tuple = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): _lowerCamelCase : Union[str, Any] = 512 _lowerCamelCase : Optional[int] = 16 _lowerCamelCase : Optional[Any] = 21 _lowerCamelCase : Dict = """pascal-voc-id2label.json""" else: _lowerCamelCase : Optional[Any] = 1_000 _lowerCamelCase : Tuple = """imagenet-1k-id2label.json""" _lowerCamelCase : Tuple = """huggingface/label-files""" _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : Dict = {int(__A ): v for k, v in idalabel.items()} _lowerCamelCase : Tuple = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def A__ ( __A , __A=False ): '''simple docstring''' for i in range(1 , 6 ): if F"""layer_{i}.""" in name: _lowerCamelCase : Tuple = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: _lowerCamelCase : Any = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: _lowerCamelCase : Tuple = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: _lowerCamelCase : Optional[int] = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: _lowerCamelCase : Tuple = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: _lowerCamelCase : List[Any] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: _lowerCamelCase : Union[str, Any] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: _lowerCamelCase : Optional[int] = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: _lowerCamelCase : List[Any] = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: _lowerCamelCase : Optional[int] = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: _lowerCamelCase : str = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: _lowerCamelCase : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: _lowerCamelCase : List[Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: _lowerCamelCase : Union[str, Any] = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: _lowerCamelCase : int = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: _lowerCamelCase : int = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: _lowerCamelCase : Optional[int] = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: _lowerCamelCase : Optional[int] = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: _lowerCamelCase : Tuple = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: _lowerCamelCase : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: _lowerCamelCase : int = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: _lowerCamelCase : Optional[Any] = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: _lowerCamelCase : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: _lowerCamelCase : str = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: _lowerCamelCase : Optional[int] = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: _lowerCamelCase : Optional[int] = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: _lowerCamelCase : int = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: _lowerCamelCase : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: _lowerCamelCase : Any = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): _lowerCamelCase : Optional[Any] = """mobilevit.""" + name return name def A__ ( __A , __A , __A=False ): '''simple docstring''' if base_model: _lowerCamelCase : Union[str, Any] = """""" else: _lowerCamelCase : Dict = """mobilevit.""" for key in orig_state_dict.copy().keys(): _lowerCamelCase : Optional[Any] = orig_state_dict.pop(__A ) if key[:8] == "encoder.": _lowerCamelCase : str = key[8:] if "qkv" in key: _lowerCamelCase : List[str] = key.split(""".""" ) _lowerCamelCase : List[Any] = int(key_split[0][6:] ) - 1 _lowerCamelCase : str = int(key_split[3] ) _lowerCamelCase : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) _lowerCamelCase : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size _lowerCamelCase : int = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: _lowerCamelCase : Union[str, Any] = val[:dim, :] _lowerCamelCase : Tuple = val[dim : dim * 2, :] _lowerCamelCase : Any = val[-dim:, :] else: _lowerCamelCase : Any = val[:dim] _lowerCamelCase : str = val[dim : dim * 2] _lowerCamelCase : str = val[-dim:] else: _lowerCamelCase : List[str] = val return orig_state_dict def A__ ( ): '''simple docstring''' _lowerCamelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCamelCase : Any = Image.open(requests.get(__A , stream=__A ).raw ) return im @torch.no_grad() def A__ ( __A , __A , __A , __A=False ): '''simple docstring''' _lowerCamelCase : List[str] = get_mobilevit_config(__A ) # load original state_dict _lowerCamelCase : int = torch.load(__A , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): _lowerCamelCase : List[str] = MobileViTForSemanticSegmentation(__A ).eval() else: _lowerCamelCase : List[str] = MobileViTForImageClassification(__A ).eval() _lowerCamelCase : List[str] = convert_state_dict(__A , __A ) model.load_state_dict(__A ) # Check outputs on an image, prepared by MobileViTImageProcessor _lowerCamelCase : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _lowerCamelCase : Any = image_processor(images=prepare_img() , return_tensors="""pt""" ) _lowerCamelCase : Any = model(**__A ) _lowerCamelCase : Any = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": _lowerCamelCase : List[str] = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , __A , atol=1E-4 ) else: assert logits.shape == (1, 1_000) if mobilevit_name == "mobilevit_s": _lowerCamelCase : Optional[int] = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": _lowerCamelCase : Dict = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": _lowerCamelCase : List[str] = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , __A , atol=1E-4 ) Path(__A ).mkdir(exist_ok=__A ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__A ) if push_to_hub: _lowerCamelCase : Optional[Any] = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) _lowerCamelCase : Dict = model_mapping[mobilevit_name] image_processor.push_to_hub(__A , organization="""apple""" ) model.push_to_hub(__A , organization="""apple""" ) if __name__ == "__main__": lowerCAmelCase : List[str] =argparse.ArgumentParser() # Required parameters parser.add_argument( "--mobilevit_name", default="mobilevit_s", type=str, help=( "Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs'," " 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'." ), ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCAmelCase : Tuple =parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
15
def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) if n == 0: return 0 _lowerCamelCase : Tuple = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) _lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def A__ ( __A , __A , __A ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCamelCase : int = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) _lowerCamelCase : Optional[Any] = max_revenue return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _lowerCamelCase : Any = 0 for i in range(1 , n + 1 ): _lowerCamelCase : Any = max_rev[i] for j in range(1 , i + 1 ): _lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) _lowerCamelCase : int = max_revenue_i return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' if n < 0: _lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): _lowerCamelCase : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ F"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = [6, 10, 12, 15, 20, 23] _lowerCamelCase : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCamelCase : Tuple = 36 _lowerCamelCase : Any = top_down_cut_rod(__A , __A ) _lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A ) _lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
15
1
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors lowerCAmelCase : int =logging.getLogger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = 'sequence-classification' def __init__( self : Optional[int] , _UpperCamelCase : str) ->Tuple: """simple docstring""" if type(_UpperCamelCase) == dict: _lowerCamelCase : Any = Namespace(**_UpperCamelCase) _lowerCamelCase : List[Any] = glue_output_modes[hparams.task] _lowerCamelCase : Optional[int] = glue_tasks_num_labels[hparams.task] super().__init__(_UpperCamelCase , _UpperCamelCase , self.mode) def _SCREAMING_SNAKE_CASE ( self : int , **_UpperCamelCase : Dict) ->List[str]: """simple docstring""" return self.model(**_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase : int = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _lowerCamelCase : List[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None _lowerCamelCase : Optional[Any] = self(**_UpperCamelCase) _lowerCamelCase : int = outputs[0] _lowerCamelCase : List[str] = self.trainer.lr_schedulers[0]["""scheduler"""] _lowerCamelCase : Union[str, Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _SCREAMING_SNAKE_CASE ( self : str) ->str: """simple docstring""" _lowerCamelCase : str = self.hparams _lowerCamelCase : Optional[int] = processors[args.task]() _lowerCamelCase : Optional[Any] = processor.get_labels() for mode in ["train", "dev"]: _lowerCamelCase : Union[str, Any] = self._feature_file(_UpperCamelCase) if os.path.exists(_UpperCamelCase) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , _UpperCamelCase) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir) _lowerCamelCase : Tuple = ( processor.get_dev_examples(args.data_dir) if mode == """dev""" else processor.get_train_examples(args.data_dir) ) _lowerCamelCase : Optional[int] = convert_examples_to_features( _UpperCamelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , _UpperCamelCase) torch.save(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : bool = False) ->DataLoader: """simple docstring""" _lowerCamelCase : int = """dev""" if mode == """test""" else mode _lowerCamelCase : str = self._feature_file(_UpperCamelCase) logger.info("""Loading features from cached file %s""" , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.load(_UpperCamelCase) _lowerCamelCase : Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long) _lowerCamelCase : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long) _lowerCamelCase : int = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long) if self.hparams.glue_output_mode == "classification": _lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long) elif self.hparams.glue_output_mode == "regression": _lowerCamelCase : Dict = torch.tensor([f.label for f in features] , dtype=torch.float) return DataLoader( TensorDataset(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]) ->Dict: """simple docstring""" _lowerCamelCase : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _lowerCamelCase : Union[str, Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None _lowerCamelCase : Any = self(**_UpperCamelCase) _lowerCamelCase , _lowerCamelCase : Optional[int] = outputs[:2] _lowerCamelCase : str = logits.detach().cpu().numpy() _lowerCamelCase : int = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Tuple) ->tuple: """simple docstring""" _lowerCamelCase : Dict = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item() _lowerCamelCase : int = np.concatenate([x["""pred"""] for x in outputs] , axis=0) if self.hparams.glue_output_mode == "classification": _lowerCamelCase : Any = np.argmax(_UpperCamelCase , axis=1) elif self.hparams.glue_output_mode == "regression": _lowerCamelCase : Tuple = np.squeeze(_UpperCamelCase) _lowerCamelCase : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0) _lowerCamelCase : Any = [[] for _ in range(out_label_ids.shape[0])] _lowerCamelCase : Any = [[] for _ in range(out_label_ids.shape[0])] _lowerCamelCase : int = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , _UpperCamelCase , _UpperCamelCase)} _lowerCamelCase : int = dict(results.items()) _lowerCamelCase : Optional[Any] = results return ret, preds_list, out_label_list def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : list) ->dict: """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self._eval_end(_UpperCamelCase) _lowerCamelCase : Optional[int] = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Optional[int]) ->dict: """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self._eval_end(_UpperCamelCase) _lowerCamelCase : str = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple) ->Any: """simple docstring""" BaseTransformer.add_model_specific_args(_UpperCamelCase , _UpperCamelCase) parser.add_argument( """--max_seq_length""" , default=128 , type=_UpperCamelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=_UpperCamelCase , required=_UpperCamelCase , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=_UpperCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""") return parser def A__ ( ): '''simple docstring''' _lowerCamelCase : List[Any] = argparse.ArgumentParser() add_generic_args(__A , os.getcwd() ) _lowerCamelCase : str = GLUETransformer.add_model_specific_args(__A , os.getcwd() ) _lowerCamelCase : Any = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _lowerCamelCase : Union[str, Any] = os.path.join( """./results""" , F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , ) os.makedirs(args.output_dir ) _lowerCamelCase : List[str] = GLUETransformer(__A ) _lowerCamelCase : str = generic_train(__A , __A ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _lowerCamelCase : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__A ) ) _lowerCamelCase : Dict = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(__A ) if __name__ == "__main__": main()
15
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
1
def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) if n == 0: return 0 _lowerCamelCase : Tuple = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) _lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def A__ ( __A , __A , __A ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCamelCase : int = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) _lowerCamelCase : Optional[Any] = max_revenue return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _lowerCamelCase : Any = 0 for i in range(1 , n + 1 ): _lowerCamelCase : Any = max_rev[i] for j in range(1 , i + 1 ): _lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) _lowerCamelCase : int = max_revenue_i return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' if n < 0: _lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): _lowerCamelCase : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ F"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = [6, 10, 12, 15, 20, 23] _lowerCamelCase : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCamelCase : Tuple = 36 _lowerCamelCase : Any = top_down_cut_rod(__A , __A ) _lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A ) _lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
15
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
1
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : List[Any] =logging.get_logger(__name__) lowerCAmelCase : Optional[Any] =[ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] lowerCAmelCase : List[Any] =[ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = torch.load(__A , map_location="""cpu""" ) return sd def A__ ( __A , __A , __A=rename_keys_prefix ): '''simple docstring''' _lowerCamelCase : Optional[int] = OrderedDict() _lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _lowerCamelCase : int = key for name_pair in rename_keys_prefix: _lowerCamelCase : str = new_key.replace(name_pair[0] , name_pair[1] ) _lowerCamelCase : int = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _lowerCamelCase : Optional[Any] = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def A__ ( __A , __A ): '''simple docstring''' assert ( checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: _lowerCamelCase : List[str] = """pretraining""" if "vcr" in checkpoint_path: _lowerCamelCase : str = {"""visual_embedding_dim""": 512} elif "vqa_advanced" in checkpoint_path: _lowerCamelCase : Dict = {"""visual_embedding_dim""": 2_048} elif "vqa" in checkpoint_path: _lowerCamelCase : Optional[int] = {"""visual_embedding_dim""": 2_048} elif "nlvr" in checkpoint_path: _lowerCamelCase : Tuple = {"""visual_embedding_dim""": 1_024} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: _lowerCamelCase : int = {"""visual_embedding_dim""": 512} _lowerCamelCase : Optional[int] = """multichoice""" elif "vqa_advanced" in checkpoint_path: _lowerCamelCase : List[Any] = {"""visual_embedding_dim""": 2_048} _lowerCamelCase : Optional[Any] = """vqa_advanced""" elif "vqa" in checkpoint_path: _lowerCamelCase : Optional[Any] = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129} _lowerCamelCase : Optional[int] = """vqa""" elif "nlvr" in checkpoint_path: _lowerCamelCase : List[str] = { """visual_embedding_dim""": 1_024, """num_labels""": 2, } _lowerCamelCase : Optional[int] = """nlvr""" _lowerCamelCase : Any = VisualBertConfig(**__A ) # Load State Dict _lowerCamelCase : Optional[int] = load_state_dict(__A ) _lowerCamelCase : int = get_new_dict(__A , __A ) if model_type == "pretraining": _lowerCamelCase : Optional[int] = VisualBertForPreTraining(__A ) elif model_type == "vqa": _lowerCamelCase : Dict = VisualBertForQuestionAnswering(__A ) elif model_type == "nlvr": _lowerCamelCase : Tuple = VisualBertForVisualReasoning(__A ) elif model_type == "multichoice": _lowerCamelCase : Optional[Any] = VisualBertForMultipleChoice(__A ) model.load_state_dict(__A ) # Save Checkpoints Path(__A ).mkdir(exist_ok=__A ) model.save_pretrained(__A ) if __name__ == "__main__": lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") lowerCAmelCase : Tuple =parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
15
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
1
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets lowerCAmelCase : int ="\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n" lowerCAmelCase : Union[str, Any] ="\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n" lowerCAmelCase : Optional[int] =r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n" @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string"""), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : str) ->Any: """simple docstring""" _lowerCamelCase : Any = 0.0 for i, j in zip(_UpperCamelCase , _UpperCamelCase): n_correct += 1.0 if math_equivalence.is_equiv(_UpperCamelCase , _UpperCamelCase) else 0.0 _lowerCamelCase : str = n_correct / len(_UpperCamelCase) return { "accuracy": accuracy, }
15
from __future__ import annotations lowerCAmelCase : int =[] def A__ ( __A , __A , __A ): '''simple docstring''' for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def A__ ( __A , __A ): '''simple docstring''' if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): _lowerCamelCase : int = 1 solve(__A , row + 1 ) _lowerCamelCase : List[str] = 0 return False def A__ ( __A ): '''simple docstring''' for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) lowerCAmelCase : int =8 lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
15
1
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase : Union[str, Any] ={ "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] =["ConvNextFeatureExtractor"] lowerCAmelCase : Optional[Any] =["ConvNextImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any =[ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvNextForImageClassification", "ConvNextModel", "ConvNextPreTrainedModel", "ConvNextBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str =[ "TFConvNextForImageClassification", "TFConvNextModel", "TFConvNextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys lowerCAmelCase : int =_LazyModule(__name__, globals()["__file__"], _import_structure)
15
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = 0 for ch in input_str: _lowerCamelCase : Optional[Any] = ord(__A ) _lowerCamelCase : List[str] = pow(2 , __A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
15
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : str =logging.get_logger(__name__) lowerCAmelCase : Any ={ "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json" ), } class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = 'xlm-roberta' def __init__( self : List[str] , _UpperCamelCase : Any=3_0522 , _UpperCamelCase : Optional[Any]=768 , _UpperCamelCase : Any=12 , _UpperCamelCase : Optional[int]=12 , _UpperCamelCase : Optional[Any]=3072 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Tuple=512 , _UpperCamelCase : str=2 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : List[str]=1E-1_2 , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : str=2 , _UpperCamelCase : Tuple="absolute" , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Dict , ) ->List[Any]: """simple docstring""" super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase) _lowerCamelCase : Union[str, Any] = vocab_size _lowerCamelCase : Tuple = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Optional[Any] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Union[str, Any] = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : Union[str, Any] = position_embedding_type _lowerCamelCase : Tuple = use_cache _lowerCamelCase : int = classifier_dropout class __snake_case ( __lowerCAmelCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
15
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""") _lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : str = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""") _lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
15
1
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __snake_case : '''simple docstring''' _snake_case = 42 _snake_case = 42 class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int) ->List[Any]: """simple docstring""" _lowerCamelCase : list[list[Edge]] = [[] for _ in range(_UpperCamelCase)] _lowerCamelCase : Any = size def __getitem__( self : int , _UpperCamelCase : int) ->Iterator[Edge]: """simple docstring""" return iter(self._graph[vertex]) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str: """simple docstring""" return self._size def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""") if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""") self._graph[from_vertex].append(Edge(_UpperCamelCase , _UpperCamelCase)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int) ->int | None: """simple docstring""" _lowerCamelCase : List[str] = deque([start_vertex]) _lowerCamelCase : list[int | None] = [None] * self.size _lowerCamelCase : List[str] = 0 while queue: _lowerCamelCase : int = queue.popleft() _lowerCamelCase : Union[str, Any] = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _lowerCamelCase : List[Any] = current_distance + edge.weight _lowerCamelCase : List[str] = distances[edge.destination_vertex] if ( isinstance(_UpperCamelCase , _UpperCamelCase) and new_distance >= dest_vertex_distance ): continue _lowerCamelCase : List[str] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex) else: queue.append(edge.destination_vertex) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""") return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
15
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
1
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def A__ ( __A ): '''simple docstring''' _lowerCamelCase : List[Any] = prime_factors(__A ) if is_square_free(__A ): return -1 if len(__A ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
15
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
1
from __future__ import annotations def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : Any = list(range(len(__A ) ) ) _lowerCamelCase : List[Any] = [v / w for v, w in zip(__A , __A )] index.sort(key=lambda __A : ratio[i] , reverse=__A ) _lowerCamelCase : float = 0 _lowerCamelCase : list[float] = [0] * len(__A ) for i in index: if weight[i] <= capacity: _lowerCamelCase : Dict = 1 max_value += value[i] capacity -= weight[i] else: _lowerCamelCase : str = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
15
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
1
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __snake_case ( unittest.TestCase ): '''simple docstring''' @parameterized.expand([(None,), ("""foo.json""",)]) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Tuple = GenerationConfig( do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_UpperCamelCase , config_name=_UpperCamelCase) _lowerCamelCase : Tuple = GenerationConfig.from_pretrained(_UpperCamelCase , config_name=_UpperCamelCase) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , _UpperCamelCase) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50) self.assertEqual(loaded_config.max_length , 20) self.assertEqual(loaded_config.max_time , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: """simple docstring""" _lowerCamelCase : List[str] = AutoConfig.from_pretrained("""gpt2""") _lowerCamelCase : Optional[Any] = GenerationConfig.from_model_config(_UpperCamelCase) _lowerCamelCase : Any = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(_UpperCamelCase , _UpperCamelCase) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: """simple docstring""" _lowerCamelCase : Optional[Any] = GenerationConfig() _lowerCamelCase : str = { """max_new_tokens""": 1024, """foo""": """bar""", } _lowerCamelCase : List[str] = copy.deepcopy(_UpperCamelCase) _lowerCamelCase : Optional[int] = generation_config.update(**_UpperCamelCase) # update_kwargs was not modified (no side effects) self.assertEqual(_UpperCamelCase , _UpperCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(_UpperCamelCase , {"""foo""": """bar"""}) def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = GenerationConfig() _lowerCamelCase : List[str] = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""") as tmp_dir: generation_config.save_pretrained(_UpperCamelCase) _lowerCamelCase : Optional[Any] = GenerationConfig.from_pretrained(_UpperCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""") _lowerCamelCase : List[Any] = GenerationConfig.from_model_config(_UpperCamelCase) assert not hasattr(_UpperCamelCase , """foo""") # no new kwargs should be initialized if from config def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Dict = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , _UpperCamelCase) self.assertEqual(default_config.num_beams , 1) _lowerCamelCase : List[Any] = GenerationConfig( do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , _UpperCamelCase) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_UpperCamelCase) _lowerCamelCase : Optional[Any] = GenerationConfig.from_pretrained(_UpperCamelCase , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , _UpperCamelCase) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class __snake_case ( unittest.TestCase ): '''simple docstring''' @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any) ->str: """simple docstring""" _lowerCamelCase : Optional[Any] = TOKEN HfFolder.save_token(_UpperCamelCase) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int) ->Optional[Any]: """simple docstring""" try: delete_repo(token=cls._token , repo_id="""test-generation-config""") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""") except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" _lowerCamelCase : int = GenerationConfig( do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token) _lowerCamelCase : Dict = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _UpperCamelCase , repo_id="""test-generation-config""" , push_to_hub=_UpperCamelCase , use_auth_token=self._token) _lowerCamelCase : Dict = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Optional[int] = GenerationConfig( do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token) _lowerCamelCase : List[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _UpperCamelCase , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=_UpperCamelCase , use_auth_token=self._token) _lowerCamelCase : str = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase))
15
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
1