code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def _snake_case ( snake_case__ : int ): A = 1 A = 2 while i * i <= n: A = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def _snake_case ( ): A = 1 A = 1 while True: i += 1 t_num += i if count_divisors(UpperCAmelCase__ ) > 500: break return t_num if __name__ == "__main__": print(solution())
713
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]: A = parent A = batch_size A = is_training A = use_auxiliary_loss A = num_queries A = num_channels A = min_size A = max_size A = num_labels A = hidden_dim A = hidden_dim def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( A_ ) A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ ) A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5 ).float() A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long() A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: A = MaskaFormerConfig( hidden_size=self.hidden_dim ,) A = self.num_queries A = self.num_labels A = [1, 1, 1, 1] A = self.num_channels A = 64 A = 128 A = self.hidden_dim A = self.hidden_dim A = self.hidden_dim return config def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A , A , A , A , A = self.prepare_config_and_inputs() A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]: A = output.encoder_hidden_states A = output.pixel_decoder_hidden_states A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A_ ) ,config.decoder_layers ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str: with torch.no_grad(): A = MaskaFormerModel(config=A_ ) model.to(A_ ) model.eval() A = model(pixel_values=A_ ,pixel_mask=A_ ) A = model(A_ ,output_hidden_states=A_ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]: A = MaskaFormerForUniversalSegmentation(config=A_ ) model.to(A_ ) model.eval() def comm_check_on_output(A_ : str ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): A = model(pixel_values=A_ ,pixel_mask=A_ ) A = model(A_ ) comm_check_on_output(A_ ) A = model( pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ ) comm_check_on_output(A_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} _lowerCamelCase: int = False _lowerCamelCase: Dict = False _lowerCamelCase: List[str] = False _lowerCamelCase: int = False def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = MaskaFormerModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: A = MaskaFormerModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = (self.model_tester.min_size,) * 2 A = { 'pixel_values': torch.randn((2, 3, *size) ,device=A_ ), 'mask_labels': torch.randn((2, 10, *size) ,device=A_ ), 'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(), } A = self.model_tester.get_config() A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ ) A = model(**A_ ) self.assertTrue(outputs.loss is not None ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ).to(A_ ) A = model(**A_ ,output_attentions=A_ ) self.assertTrue(outputs.attentions is not None ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: if not self.model_tester.is_training: return A = self.all_model_classes[1] A , A , A , A , A = self.model_tester.prepare_config_and_inputs() A = model_class(A_ ) model.to(A_ ) model.train() A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = self.all_model_classes[1] A , A , A , A , A = self.model_tester.prepare_config_and_inputs() A = True A = True A = model_class(A_ ).to(A_ ) model.train() A = model(A_ ,mask_labels=A_ ,class_labels=A_ ) A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=A_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowercase = 1e-4 def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ ) A = self.default_image_processor A = prepare_img() A = image_processor(A_ ,return_tensors='pt' ).to(A_ ) A = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A_ ,(1, 3, 384, 384) ) with torch.no_grad(): A = model(**A_ ) A = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) ) A = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) ) A = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval() A = self.default_image_processor A = prepare_img() A = image_processor(A_ ,return_tensors='pt' ).to(A_ ) A = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A_ ,(1, 3, 384, 384) ) with torch.no_grad(): A = model(**A_ ) # masks_queries_logits A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) A = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] A = torch.tensor(A_ ).to(A_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) ) # class_queries_logits A = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) ) A = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(A_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval() A = self.default_image_processor A = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,) A = inputs['pixel_values'].to(A_ ) A = [el.to(A_ ) for el in inputs['mask_labels']] A = [el.to(A_ ) for el in inputs['class_labels']] with torch.no_grad(): A = model(**A_ ) self.assertTrue(outputs.loss is not None )
22
0
_lowercase = 6_55_21 def _snake_case ( snake_case__ : str ): A = 1 A = 0 for plain_chr in plain_text: A = (a + ord(UpperCAmelCase__ )) % MOD_ADLER A = (b + a) % MOD_ADLER return (b << 16) | a
714
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]: A = parent A = 13 A = 7 A = True A = True A = True A = 99 A = 32 A = 2 A = 4 A = 37 A = 'gelu' A = 0.1 A = 0.1 A = 512 A = 16 A = 2 A = 0.02 A = 3 A = 4 A = None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = self.prepare_config_and_inputs() A = True A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict: A = TFEsmModel(config=A_ ) A = {'input_ids': input_ids, 'attention_mask': input_mask} A = model(A_ ) A = [input_ids, input_mask] A = model(A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]: A = True A = TFEsmModel(config=A_ ) A = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } A = model(A_ ) A = [input_ids, input_mask] A = model(A_ ,encoder_hidden_states=A_ ) # Also check the case where encoder outputs are not passed A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict: A = TFEsmForMaskedLM(config=A_ ) A = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]: A = self.num_labels A = TFEsmForTokenClassification(config=A_ ) A = {'input_ids': input_ids, 'attention_mask': input_mask} A = model(A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) _lowerCamelCase: List[str] = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: A = TFEsmModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFEsmModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @unittest.skip('Protein models do not support embedding resizing.' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip('Protein models do not support embedding resizing.' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A = model.get_bias() assert isinstance(A_ ,A_ ) for k, v in name.items(): assert isinstance(A_ ,tf.Variable ) else: A = model.get_output_embeddings() assert x is None A = model.get_bias() assert name is None @require_tf class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A = tf.constant([[0, 1, 2, 3, 4, 5]] ) A = model(A_ )[0] A = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) ,A_ ) # compare the actual values for a slice. A = tf.constant( [ [ [8.92_15_18, -10.58_98_14, -6.4_67_13_07], [-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15], [-7.78_12_47, -13.95_15_57, -3.74_05_92], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A = model(A_ )[0] # compare the actual values for a slice. A = tf.constant( [ [ [0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39], [0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22], [0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
22
0
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
715
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys _lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') _lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split() _lowercase = '''|'''.join(sys.argv[1:]) _lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""") _lowercase = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
22
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = tempfile.mkdtemp() # fmt: off A = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] A = {'unk_token': '<unk>'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) A = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } A = os.path.join(self.tmpdirname ,A_ ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,**A_ : List[str] ) -> List[Any]: return CLIPTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,**A_ : int ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,**A_ : Optional[int] ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: A = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] A = [Image.fromarray(np.moveaxis(A_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_tokenizer() A = self.get_rust_tokenizer() A = self.get_image_processor() A = CLIPSegProcessor(tokenizer=A_ ,image_processor=A_ ) processor_slow.save_pretrained(self.tmpdirname ) A = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=A_ ) A = CLIPSegProcessor(tokenizer=A_ ,image_processor=A_ ) processor_fast.save_pretrained(self.tmpdirname ) A = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,A_ ) self.assertIsInstance(processor_fast.tokenizer ,A_ ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,A_ ) self.assertIsInstance(processor_fast.image_processor ,A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) A = self.get_image_processor(do_normalize=A_ ,padding_value=1.0 ) A = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=A_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,A_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: A = self.get_image_processor() A = self.get_tokenizer() A = CLIPSegProcessor(tokenizer=A_ ,image_processor=A_ ) A = self.prepare_image_inputs() A = image_processor(A_ ,return_tensors='np' ) A = processor(images=A_ ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: A = self.get_image_processor() A = self.get_tokenizer() A = CLIPSegProcessor(tokenizer=A_ ,image_processor=A_ ) A = 'lower newer' A = processor(text=A_ ) A = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = self.get_image_processor() A = self.get_tokenizer() A = CLIPSegProcessor(tokenizer=A_ ,image_processor=A_ ) A = 'lower newer' A = self.prepare_image_inputs() A = processor(text=A_ ,images=A_ ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = self.get_image_processor() A = self.get_tokenizer() A = CLIPSegProcessor(tokenizer=A_ ,image_processor=A_ ) A = self.prepare_image_inputs() A = self.prepare_image_inputs() A = processor(images=A_ ,visual_prompt=A_ ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.get_image_processor() A = self.get_tokenizer() A = CLIPSegProcessor(tokenizer=A_ ,image_processor=A_ ) A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A = processor.batch_decode(A_ ) A = tokenizer.batch_decode(A_ ) self.assertListEqual(A_ ,A_ )
716
"""simple docstring""" import sys from collections import defaultdict class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ) -> int: A = [] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]: return self.node_position[vertex] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]: A = pos def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: A = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: A = 2 * start + 1 else: A = 2 * start + 2 if heap[smallest_child] < heap[start]: A , A = heap[smallest_child], positions[smallest_child] A , A = ( heap[start], positions[start], ) A , A = temp, tempa A = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,A_ ) self.top_to_bottom(A_ ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict: A = position[index] while index != 0: A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: A = heap[parent] A = position[parent] self.set_position(position[parent] ,A_ ) else: A = val A = temp self.set_position(A_ ,A_ ) break A = parent else: A = val A = temp self.set_position(A_ ,0 ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]: A = len(A_ ) // 2 - 1 for i in range(A_ ,-1 ,-1 ): self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]: A = positions[0] A = sys.maxsize self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ ) return temp def _snake_case ( snake_case__ : Dict ): A = Heap() A = [0] * len(snake_case__ ) A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph A = [] # Heap of Distance of vertices from their neighboring vertex A = [] for vertex in range(len(snake_case__ ) ): distance_tv.append(sys.maxsize ) positions.append(snake_case__ ) heap.node_position.append(snake_case__ ) A = [] A = 1 A = sys.maxsize for neighbor, distance in adjacency_list[0]: A = 0 A = distance heap.heapify(snake_case__ , snake_case__ ) for _ in range(1 , len(snake_case__ ) ): A = heap.delete_minimum(snake_case__ , snake_case__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) A = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(snake_case__ )] ): A = distance heap.bottom_to_top( snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ ) A = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _lowercase = int(input('''Enter number of edges: ''').strip()) _lowercase = defaultdict(list) for _ in range(edges_number): _lowercase = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
22
0
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node _lowercase = 4 _lowercase = 3 class lowerCAmelCase_ ( __lowerCAmelCase ): '''simple docstring''' pass def _snake_case ( snake_case__ : List[str] ): for shard in shards: for i in range(snake_case__ ): yield {"i": i, "shard": shard} def _snake_case ( ): A = int(os.environ['RANK'] ) A = int(os.environ['WORLD_SIZE'] ) A = ArgumentParser() parser.add_argument('--streaming' , type=snake_case__ ) parser.add_argument('--local_rank' , type=snake_case__ ) parser.add_argument('--num_workers' , type=snake_case__ , default=0 ) A = parser.parse_args() A = args.streaming A = args.num_workers A = {'''shards''': [F'shard_{shard_idx}' for shard_idx in range(snake_case__ )]} A = IterableDataset.from_generator(snake_case__ , gen_kwargs=snake_case__ ) if not streaming: A = Dataset.from_list(list(snake_case__ ) ) A = split_dataset_by_node(snake_case__ , rank=snake_case__ , world_size=snake_case__ ) A = torch.utils.data.DataLoader(snake_case__ , num_workers=snake_case__ ) A = NUM_SHARDS * NUM_ITEMS_PER_SHARD A = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) A = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' ) if __name__ == "__main__": main()
717
"""simple docstring""" import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter _lowercase = True except ImportError: _lowercase = False _lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( snake_case__ : Namespace ): return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('add-new-model' ) add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' ) add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' ) add_new_model_parser.add_argument( '--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' ) add_new_model_parser.set_defaults(func=A_ ) def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]: A = testing A = testing_file A = path def _SCREAMING_SNAKE_CASE ( self : int ) -> int: warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.' ) if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(A_ ) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.' ) A = ( Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) A = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(A_ ) ) else: with open(self._testing_file ,'r' ) as configuration_file: A = json.load(A_ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,) A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + '/configuration.json' ,'r' ) as configuration_file: A = json.load(A_ ) A = configuration['lowercase_modelname'] A = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(F'{directory}/configuration.json' ) A = 'PyTorch' in generate_tensorflow_pytorch_and_flax A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax A = 'Flax' in generate_tensorflow_pytorch_and_flax A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(A_ ,exist_ok=A_ ) os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ ) # Tests require submodules as they have parent imports with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ): pass shutil.move( F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,) shutil.move( F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,) def remove_copy_lines(A_ : int ): with open(A_ ,'r' ) as f: A = f.readlines() with open(A_ ,'w' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(A_ ) if output_pytorch: if not self._testing: remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,) shutil.move( F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(A_ : str ,A_ : str ,A_ : List[str] ): # Create temp file A , A = mkstemp() A = False with fdopen(A_ ,'w' ) as new_file: with open(A_ ) as old_file: for line in old_file: new_file.write(A_ ) if line_to_copy_below in line: A = True for line_to_copy in lines_to_copy: new_file.write(A_ ) if not line_found: raise ValueError(F'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(A_ ,A_ ) # Remove original file remove(A_ ) # Move new file move(A_ ,A_ ) def skip_units(A_ : Dict ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(A_ : Tuple ): with open(A_ ) as datafile: A = [] A = False A = False for line in datafile: if "# To replace in: " in line and "##" not in line: A = line.split('"' )[1] A = skip_units(A_ ) elif "# Below: " in line and "##" not in line: A = line.split('"' )[1] A = skip_units(A_ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(A_ ,A_ ,A_ ) A = [] elif "# Replace with" in line and "##" not in line: A = [] elif "##" not in line: lines_to_copy.append(A_ ) remove(A_ ) replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(A_ )
22
0
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' _lowerCamelCase: str = (DEISMultistepScheduler,) _lowerCamelCase: Tuple = (('''num_inference_steps''', 25),) def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : Optional[Any] ) -> List[str]: A = { '''num_train_timesteps''': 1000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**A_ ) return config def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str=0 ,**A_ : int ) -> Optional[Any]: A = dict(self.forward_default_kwargs ) A = kwargs.pop('num_inference_steps' ,A_ ) A = self.dummy_sample A = 0.1 * sample A = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config(**A_ ) A = scheduler_class(**A_ ) scheduler.set_timesteps(A_ ) # copy over dummy past residuals A = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A_ ) A = scheduler_class.from_pretrained(A_ ) new_scheduler.set_timesteps(A_ ) # copy over dummy past residuals A = dummy_past_residuals[: new_scheduler.config.solver_order] A = sample, sample for t in range(A_ ,time_step + scheduler.config.solver_order + 1 ): A = scheduler.step(A_ ,A_ ,A_ ,**A_ ).prev_sample A = new_scheduler.step(A_ ,A_ ,A_ ,**A_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : str ) -> int: pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : str=0 ,**A_ : List[str] ) -> List[str]: A = dict(self.forward_default_kwargs ) A = kwargs.pop('num_inference_steps' ,A_ ) A = self.dummy_sample A = 0.1 * sample A = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config() A = scheduler_class(**A_ ) scheduler.set_timesteps(A_ ) # copy over dummy past residuals (must be after setting timesteps) A = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A_ ) A = scheduler_class.from_pretrained(A_ ) # copy over dummy past residuals new_scheduler.set_timesteps(A_ ) # copy over dummy past residual (must be after setting timesteps) A = dummy_past_residuals[: new_scheduler.config.solver_order] A = scheduler.step(A_ ,A_ ,A_ ,**A_ ).prev_sample A = new_scheduler.step(A_ ,A_ ,A_ ,**A_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str]=None ,**A_ : List[str] ) -> Any: if scheduler is None: A = self.scheduler_classes[0] A = self.get_scheduler_config(**A_ ) A = scheduler_class(**A_ ) A = self.scheduler_classes[0] A = self.get_scheduler_config(**A_ ) A = scheduler_class(**A_ ) A = 10 A = self.dummy_model() A = self.dummy_sample_deter scheduler.set_timesteps(A_ ) for i, t in enumerate(scheduler.timesteps ): A = model(A_ ,A_ ) A = scheduler.step(A_ ,A_ ,A_ ).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: A = dict(self.forward_default_kwargs ) A = kwargs.pop('num_inference_steps' ,A_ ) for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config() A = scheduler_class(**A_ ) A = self.dummy_sample A = 0.1 * sample if num_inference_steps is not None and hasattr(A_ ,'set_timesteps' ): scheduler.set_timesteps(A_ ) elif num_inference_steps is not None and not hasattr(A_ ,'set_timesteps' ): A = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A = [residual + 0.2, residual + 0.15, residual + 0.10] A = dummy_past_residuals[: scheduler.config.solver_order] A = scheduler.timesteps[5] A = scheduler.timesteps[6] A = scheduler.step(A_ ,A_ ,A_ ,**A_ ).prev_sample A = scheduler.step(A_ ,A_ ,A_ ,**A_ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults A = DEISMultistepScheduler(**self.get_scheduler_config() ) A = self.full_loop(scheduler=A_ ) A = torch.mean(torch.abs(A_ ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3 A = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A = DPMSolverMultistepScheduler.from_config(scheduler.config ) A = UniPCMultistepScheduler.from_config(scheduler.config ) A = DEISMultistepScheduler.from_config(scheduler.config ) A = self.full_loop(scheduler=A_ ) A = torch.mean(torch.abs(A_ ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: self.check_over_configs(thresholding=A_ ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=A_ ,prediction_type=A_ ,sample_max_value=A_ ,algorithm_type='deis' ,solver_order=A_ ,solver_type=A_ ,) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=A_ ,solver_type=A_ ,prediction_type=A_ ,algorithm_type=A_ ,) A = self.full_loop( solver_order=A_ ,solver_type=A_ ,prediction_type=A_ ,algorithm_type=A_ ,) assert not torch.isnan(A_ ).any(), "Samples have nan numbers" def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: self.check_over_configs(lower_order_final=A_ ) self.check_over_configs(lower_order_final=A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> int: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=A_ ,time_step=0 ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = self.full_loop() A = torch.mean(torch.abs(A_ ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.full_loop(prediction_type='v_prediction' ) A = torch.mean(torch.abs(A_ ) ) assert abs(result_mean.item() - 0.0_91 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = self.scheduler_classes[0] A = self.get_scheduler_config(thresholding=A_ ,dynamic_thresholding_ratio=0 ) A = scheduler_class(**A_ ) A = 10 A = self.dummy_model() A = self.dummy_sample_deter.half() scheduler.set_timesteps(A_ ) for i, t in enumerate(scheduler.timesteps ): A = model(A_ ,A_ ) A = scheduler.step(A_ ,A_ ,A_ ).prev_sample assert sample.dtype == torch.floataa
718
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str: A = size if size is not None else {'height': 18, 'width': 18} A = parent A = batch_size A = num_channels A = image_size A = min_resolution A = max_resolution A = do_resize A = size A = do_normalize def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ImageGPTImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ ,'clusters' ) ) self.assertTrue(hasattr(A_ ,'do_resize' ) ) self.assertTrue(hasattr(A_ ,'size' ) ) self.assertTrue(hasattr(A_ ,'do_normalize' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} ) A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: A = self.image_processing_class(**self.image_processor_dict ) A = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,obj[key] ) ) else: self.assertEqual(obj[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(A_ ,'image_processor.json' ) image_processor_first.to_json_file(A_ ) A = self.image_processing_class.from_json_file(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(A_ ) A = self.image_processing_class.from_pretrained(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) @unittest.skip('ImageGPT requires clusters at initialization' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: pass def _snake_case ( ): A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' ) A = Image.open(dataset[4]['file'] ) A = Image.open(dataset[5]['file'] ) A = [imagea, imagea] return images @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' ) A = prepare_images() # test non-batched A = image_processing(images[0] ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(1, 1024) ) A = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ ) # test batched A = image_processing(A_ ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(2, 1024) ) A = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ )
22
0
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { """configuration_trajectory_transformer""": [ """TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrajectoryTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ """TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrajectoryTransformerModel""", """TrajectoryTransformerPreTrainedModel""", """load_tf_weights_in_trajectory_transformer""", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
719
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def _snake_case ( snake_case__ : Optional[int] ): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' ) download_parser.add_argument( '--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,) download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' ) download_parser.set_defaults(func=A_ ) def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]: A = model A = cache A = force A = trust_remote_code def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
22
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = """decision_transformer""" _lowerCamelCase: Tuple = ["""past_key_values"""] _lowerCamelCase: Dict = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[Any] ,A_ : int=17 ,A_ : int=4 ,A_ : Union[str, Any]=128 ,A_ : Optional[Any]=4096 ,A_ : List[Any]=True ,A_ : str=1 ,A_ : str=1024 ,A_ : List[str]=3 ,A_ : str=1 ,A_ : Any=None ,A_ : Optional[int]="relu" ,A_ : List[Any]=0.1 ,A_ : str=0.1 ,A_ : Tuple=0.1 ,A_ : Union[str, Any]=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[Any]=True ,A_ : Tuple=True ,A_ : Optional[Any]=5_0256 ,A_ : List[Any]=5_0256 ,A_ : List[str]=False ,A_ : Any=False ,**A_ : List[str] ,) -> List[Any]: A = state_dim A = act_dim A = hidden_size A = max_ep_len A = action_tanh A = vocab_size A = n_positions A = n_layer A = n_head A = n_inner A = activation_function A = resid_pdrop A = embd_pdrop A = attn_pdrop A = layer_norm_epsilon A = initializer_range A = scale_attn_weights A = use_cache A = scale_attn_by_inverse_layer_idx A = reorder_and_upcast_attn A = bos_token_id A = eos_token_id super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ )
720
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''spm_char.model'''} _lowercase = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } _lowercase = { '''microsoft/speecht5_asr''': 10_24, '''microsoft/speecht5_tts''': 10_24, '''microsoft/speecht5_vc''': 10_24, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None: A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = vocab_file A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.sp_model.get_piece_size() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Any: A = self.__dict__.copy() A = None return state def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]: return self.sp_model.piece_to_id(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]: A = self.sp_model.IdToPiece(A_ ) return token def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]: A = [] A = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A_ ) + token A = [] else: current_sub_tokens.append(A_ ) out_string += self.sp_model.decode(A_ ) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) A = [1] if token_ids_a is None: return ([0] * len(A_ )) + suffix_ones return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
22
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
721
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPFeatureExtractor'''] _lowercase = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
0
"""simple docstring""" from __future__ import annotations from collections.abc import Callable def _snake_case ( snake_case__ : Callable[[int | float], int | float] , snake_case__ : int | float , snake_case__ : int | float , snake_case__ : int = 100 , ): A = x_start A = fnc(snake_case__ ) A = 0.0 for _ in range(snake_case__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area A = (x_end - x_start) / steps + xa A = fnc(snake_case__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step A = xa A = fxa return area if __name__ == "__main__": def _snake_case ( snake_case__ : Tuple ): return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') _lowercase = 10 while i <= 10_00_00: print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""") i *= 10
700
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: Dict = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[Any] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
22
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowercase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''The column name of the images in the files.'''} ) _lowerCamelCase: Optional[str] = field(default=_lowercase , metadata={'''help''': '''A folder containing the training data.'''} ) _lowerCamelCase: Optional[str] = field(default=_lowercase , metadata={'''help''': '''A folder containing the validation data.'''} ) _lowerCamelCase: Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) _lowerCamelCase: Optional[int] = field( default=_lowercase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) _lowerCamelCase: Optional[int] = field( default=_lowercase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = {} if self.train_dir is not None: A = self.train_dir if self.validation_dir is not None: A = self.validation_dir A = data_files if data_files else None @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = field( default=_lowercase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) _lowerCamelCase: str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) _lowerCamelCase: str = field(default=_lowercase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) _lowerCamelCase: bool = field( default=_lowercase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) _lowerCamelCase: float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) _lowerCamelCase: bool = field( default=_lowercase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( snake_case__ : Union[str, Any] ): A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , snake_case__ , snake_case__ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A = training_args.get_process_log_level() logger.setLevel(snake_case__ ) transformers.utils.logging.set_verbosity(snake_case__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0: A = ds['train'].train_test_split(data_args.train_val_split ) A = split['train'] A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: A = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case__ ) elif model_args.model_name_or_path: A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ ) else: A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ ) elif model_args.model_name_or_path: A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ ) else: A = ViTImageProcessor() # create model if model_args.model_name_or_path: A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) A = ViTMAEForPreTraining(snake_case__ ) if training_args.do_train: A = ds['train'].column_names else: A = ds['validation'].column_names if data_args.image_column_name is not None: A = data_args.image_column_name elif "image" in column_names: A = 'image' elif "img" in column_names: A = 'img' else: A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: A = image_processor.size['shortest_edge'] else: A = (image_processor.size['height'], image_processor.size['width']) A = Compose( [ Lambda(lambda snake_case__ : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(snake_case__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(snake_case__ : Dict ): A = [transforms(snake_case__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case__ ) # Compute absolute learning rate A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: A = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer A = Trainer( model=snake_case__ , args=snake_case__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , ) # Training if training_args.do_train: A = None if training_args.resume_from_checkpoint is not None: A = training_args.resume_from_checkpoint elif last_checkpoint is not None: A = last_checkpoint A = trainer.train(resume_from_checkpoint=snake_case__ ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: A = trainer.evaluate() trainer.log_metrics('eval' , snake_case__ ) trainer.save_metrics('eval' , snake_case__ ) # Write model card and (optionally) push to hub A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case__ ) else: trainer.create_model_card(**snake_case__ ) def _snake_case ( snake_case__ : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
701
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _lowercase = 8 def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ): A = x.device A = (x * 255).int().clamp(0 , 255 ) A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' ) A = ((x & mask) != 0).float() A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' ) A = bits * 2 - 1 return bits def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ): A = x.device A = (x > 0).int() A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 ) A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 255).clamp(0.0 , 1.0 ) def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ): if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) A = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas A = self.alphas_cumprod[timestep] A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod A = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) A = self._get_variance(snake_case__ , snake_case__ ) A = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu' A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ ) A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise A = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ): A = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 ) else: A = None # 1. compute alphas, betas A = self.alphas_cumprod[t] A = self.alphas_cumprod[t - 1] if t > 0 else self.one A = 1 - alpha_prod_t A = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": A = model_output else: raise ValueError(F'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A = 0 if t > 0: A = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device ) A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise A = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]: super().__init__() A = bit_scale A = ( ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=A_ ,scheduler=A_ ) @torch.no_grad() def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]: A = torch.randn( (batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,) A = decimal_to_bits(A_ ) * self.bit_scale A = latents.to(self.device ) self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual A = self.unet(A_ ,A_ ).sample # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample A = bits_to_decimal(A_ ) if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
22
0
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: int = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''BlipImageProcessor''' _lowerCamelCase: Any = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : str ,A_ : str ,A_ : Any ) -> List[Any]: A = False super().__init__(A_ ,A_ ) A = self.image_processor def __call__( self : Dict ,A_ : ImageInput = None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding # add pixel_values A = self.image_processor(A_ ,return_tensors=A_ ) if text is not None: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : int ,*A_ : Any ,**A_ : Optional[int] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : str ,**A_ : Any ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
702
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''yolos''' def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = num_detection_tokens A = use_mid_position_embeddings A = auxiliary_loss # Hungarian matcher A = class_cost A = bbox_cost A = giou_cost # Loss coefficients A = bbox_loss_coefficient A = giou_loss_coefficient A = eos_coefficient class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> float: return 1e-4 @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: return 12
22
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: str = '''gptj''' _lowerCamelCase: Tuple = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : str ,A_ : Tuple=5_0400 ,A_ : Union[str, Any]=2048 ,A_ : Tuple=4096 ,A_ : Any=28 ,A_ : List[str]=16 ,A_ : Tuple=64 ,A_ : List[str]=None ,A_ : Optional[int]="gelu_new" ,A_ : Tuple=0.0 ,A_ : Union[str, Any]=0.0 ,A_ : Tuple=0.0 ,A_ : Optional[int]=1e-5 ,A_ : str=0.02 ,A_ : Optional[Any]=True ,A_ : Optional[Any]=5_0256 ,A_ : List[Any]=5_0256 ,A_ : Any=False ,**A_ : int ,) -> int: A = vocab_size A = n_positions A = n_embd A = n_layer A = n_head A = n_inner A = rotary_dim A = activation_function A = resid_pdrop A = embd_pdrop A = attn_pdrop A = layer_norm_epsilon A = initializer_range A = use_cache A = bos_token_id A = eos_token_id super().__init__( bos_token_id=A_ ,eos_token_id=A_ ,tie_word_embeddings=A_ ,**A_ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : PretrainedConfig ,A_ : str = "default" ,A_ : List[PatchingSpec] = None ,A_ : bool = False ,) -> Union[str, Any]: super().__init__(A_ ,task=A_ ,patching_specs=A_ ,use_past=A_ ) if not getattr(self._config ,'pad_token_id' ,A_ ): # TODO: how to do that better? A = 0 @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: A = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(A_ ,direction='inputs' ) A = {0: 'batch', 1: 'past_sequence + sequence'} else: A = {0: 'batch', 1: 'sequence'} return common_inputs @property def _SCREAMING_SNAKE_CASE ( self : str ) -> int: return self._config.n_layer @property def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return self._config.n_head def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : PreTrainedTokenizer ,A_ : int = -1 ,A_ : int = -1 ,A_ : bool = False ,A_ : Optional[TensorType] = None ,) -> Mapping[str, Any]: A = super(A_ ,self ).generate_dummy_inputs( A_ ,batch_size=A_ ,seq_length=A_ ,is_pair=A_ ,framework=A_ ) # We need to order the input in the way they appears in the forward() A = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch A , A = common_inputs['input_ids'].shape # Not using the same length for past_key_values A = seqlen + 2 A = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers ) ] A = common_inputs['attention_mask'] if self.use_past: A = ordered_inputs['attention_mask'].dtype A = torch.cat( [ordered_inputs['attention_mask'], torch.ones(A_ ,A_ ,dtype=A_ )] ,dim=1 ) return ordered_inputs @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return 13
703
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
22
0
"""simple docstring""" import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def _snake_case ( snake_case__ : Optional[int] ): A = model.config A = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) A = MBartConfig( is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=snake_case__ , add_final_layer_norm=snake_case__ , ) return encoder_config, decoder_config def _snake_case ( snake_case__ : str ): if "encoder.model" in name: A = name.replace('encoder.model' , 'encoder' ) if "decoder.model" in name: A = name.replace('decoder.model' , 'decoder' ) if "patch_embed.proj" in name: A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: A = name.replace('patch_embed.norm' , 'embeddings.norm' ) if name.startswith('encoder' ): if "layers" in name: A = 'encoder.' + name if "attn.proj" in name: A = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "mask" not in name: A = name.replace('attn' , 'attention.self' ) if "norm1" in name: A = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A = name.replace('mlp.fc2' , 'output.dense' ) if name == "encoder.norm.weight": A = 'encoder.layernorm.weight' if name == "encoder.norm.bias": A = 'encoder.layernorm.bias' return name def _snake_case ( snake_case__ : Optional[int] , snake_case__ : str ): for key in orig_state_dict.copy().keys(): A = orig_state_dict.pop(snake_case__ ) if "qkv" in key: A = key.split('.' ) A = int(key_split[3] ) A = int(key_split[5] ) A = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A = val[:dim, :] A = val[dim : dim * 2, :] A = val[-dim:, :] else: A = val[:dim] A = val[dim : dim * 2] A = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: A = val return orig_state_dict def _snake_case ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : List[Any]=False ): # load original model A = DonutModel.from_pretrained(snake_case__ ).eval() # load HuggingFace model A , A = get_configs(snake_case__ ) A = DonutSwinModel(snake_case__ ) A = MBartForCausalLM(snake_case__ ) A = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ ) model.eval() A = original_model.state_dict() A = convert_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ ) # verify results on scanned document A = load_dataset('hf-internal-testing/example-documents' ) A = dataset['test'][0]['image'].convert('RGB' ) A = XLMRobertaTokenizerFast.from_pretrained(snake_case__ , from_slow=snake_case__ ) A = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) A = DonutProcessor(snake_case__ , snake_case__ ) A = processor(snake_case__ , return_tensors='pt' ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": A = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' A = 'When is the coffee break?' A = task_prompt.replace('{user_input}' , snake_case__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": A = '<s_rvlcdip>' elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: A = '<s_cord>' elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": A = 's_cord-v2>' elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": A = '<s_zhtrainticket>' elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt A = 'hello world' else: raise ValueError('Model name not supported' ) A = original_model.decoder.tokenizer(snake_case__ , add_special_tokens=snake_case__ , return_tensors='pt' )[ 'input_ids' ] A = original_model.encoder.model.patch_embed(snake_case__ ) A , A = model.encoder.embeddings(snake_case__ ) assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) # verify encoder hidden states A = original_model.encoder(snake_case__ ) A = model.encoder(snake_case__ ).last_hidden_state assert torch.allclose(snake_case__ , snake_case__ , atol=1e-2 ) # verify decoder hidden states A = original_model(snake_case__ , snake_case__ , snake_case__ ).logits A = model(snake_case__ , decoder_input_ids=snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) if push_to_hub: model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' ) processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''naver-clova-ix/donut-base-finetuned-docvqa''', required=False, type=str, help='''Name of the original model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, required=False, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub.''', ) _lowercase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
704
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Dict = ['''pixel_values'''] def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None: super().__init__(**A_ ) A = size if size is not None else {'shortest_edge': 256} A = get_size_dict(A_ ,default_to_square=A_ ) A = crop_size if crop_size is not None else {'height': 224, 'width': 224} A = get_size_dict(A_ ,param_name='crop_size' ) A = do_resize A = size A = resample A = do_center_crop A = crop_size A = do_rescale A = rescale_factor A = do_normalize A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray: A = get_size_dict(A_ ,default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ ) return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray: A = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray: return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray: return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]: A = do_resize if do_resize is not None else self.do_resize A = size if size is not None else self.size A = get_size_dict(A_ ,default_to_square=A_ ) A = resample if resample is not None else self.resample A = do_center_crop if do_center_crop is not None else self.do_center_crop A = crop_size if crop_size is not None else self.crop_size A = get_size_dict(A_ ,param_name='crop_size' ) A = do_rescale if do_rescale is not None else self.do_rescale A = rescale_factor if rescale_factor is not None else self.rescale_factor A = do_normalize if do_normalize is not None else self.do_normalize A = image_mean if image_mean is not None else self.image_mean A = image_std if image_std is not None else self.image_std A = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. A = [to_numpy_array(A_ ) for image in images] if do_resize: A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images] if do_center_crop: A = [self.center_crop(image=A_ ,size=A_ ) for image in images] if do_rescale: A = [self.rescale(image=A_ ,scale=A_ ) for image in images] if do_normalize: A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images] A = [to_channel_dimension_format(A_ ,A_ ) for image in images] A = {'pixel_values': images} return BatchFeature(data=A_ ,tensor_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str: A = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(A_ ): A = target_sizes.numpy() A = [] for idx in range(len(A_ ) ): A = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ ) A = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: A = logits.argmax(dim=1 ) A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
0
"""simple docstring""" def _snake_case ( snake_case__ : str , snake_case__ : list[str] ): A = '' for word_or_phrase in separated: if not isinstance(snake_case__ , snake_case__ ): raise Exception('join() accepts only strings to be joined' ) joined += word_or_phrase + separator return joined.strip(snake_case__ ) if __name__ == "__main__": from doctest import testmod testmod()
705
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 _lowercase = data_utils.TransfoXLTokenizer _lowercase = data_utils.TransfoXLCorpus _lowercase = data_utils _lowercase = data_utils def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(snake_case__ , 'rb' ) as fp: A = pickle.load(snake_case__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'Save vocabulary to {pytorch_vocab_dump_path}' ) A = corpus.vocab.__dict__ torch.save(snake_case__ , snake_case__ ) A = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , snake_case__ ) A = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(snake_case__ , snake_case__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model A = os.path.abspath(snake_case__ ) A = os.path.abspath(snake_case__ ) print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": A = TransfoXLConfig() else: A = TransfoXLConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) A = TransfoXLLMHeadModel(snake_case__ ) A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model A = os.path.join(snake_case__ , snake_case__ ) A = os.path.join(snake_case__ , snake_case__ ) print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' ) torch.save(model.state_dict() , snake_case__ ) print(F'Save configuration file to {os.path.abspath(snake_case__ )}' ) with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) _lowercase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
22
0
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : int=None ): # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match' A = nn.Parameter(snake_case__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match' A = nn.Parameter(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] ): # set torch weights for 1-to-1 comparison A = np.asarray(weights[0] ) A = np.asarray(weights[1] ) A = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , ) set_param( torch_layer.output.dense , torch.tensor(snake_case__ ).view(-1 , snake_case__ ).contiguous().transpose(0 , 1 ) , ) def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Dict ): # set torch weights for 1-to-1 comparison A = np.asarray(weights[0] ) A = np.asarray(weights[1] ) A = np.asarray(weights[2] ) A = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , ) set_param( torch_layer.output.dense , torch.tensor(snake_case__ ).view(-1 , snake_case__ ).contiguous().transpose(0 , 1 ) , ) def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ): # layernorm 1 A = weights[0][0][0] A = np.asarray(layer_norm_a[0] ) A = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , ) # lsh weights + output A = weights[0][1] if len(snake_case__ ) < 4: set_layer_weights_in_torch_lsh(snake_case__ , torch_block.attention , snake_case__ ) else: set_layer_weights_in_torch_local(snake_case__ , torch_block.attention , snake_case__ ) # intermediate weighs A = weights[2][0][1][2] # Chunked Feed Forward if len(snake_case__ ) == 4: A = intermediate_weights[2] # layernorm 2 A = np.asarray(intermediate_weights[0][0] ) A = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , ) # intermediate dense A = np.asarray(intermediate_weights[1][0] ) A = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , ) # intermediate out A = np.asarray(intermediate_weights[4][0] ) A = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , ) def _snake_case ( snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ): # reformer model A = torch_model.reformer # word embeds A = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case__ ) , ) if isinstance(weights[3] , snake_case__ ): A = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): A = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'{position_embeddings[emb_idx]} emb does not match' A = nn.Parameter(torch.tensor(snake_case__ ) ) A = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( snake_case__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): A = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(snake_case__ , snake_case__ , snake_case__ ) # output layer norm A = np.asarray(weights[7][0] ) A = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , ) # output embeddings A = np.asarray(weights[9][0] ) A = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , ) def _snake_case ( snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Any ): # Initialise PyTorch model A = ReformerConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) A = ReformerModelWithLMHead(snake_case__ ) with open(snake_case__ , 'rb' ) as f: A = pickle.load(snake_case__ )['weights'] set_model_weights_in_torch(snake_case__ , snake_case__ , config.hidden_size ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
706
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ) -> int: A = {} def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int: if self.graph.get(A_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A = [[w, v]] if not self.graph.get(A_ ): A = [] def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return list(self.graph ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]: if self.graph.get(A_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str: if s == d: return [] A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return visited def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int: if c == -1: A = floor(random() * 1_0000 ) + 10 for i in range(A_ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A = floor(random() * c ) + 1 if n != i: self.add_pair(A_ ,A_ ,1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]: A = deque() A = [] if s == -2: A = list(self.graph )[0] d.append(A_ ) visited.append(A_ ) while d: A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any: A = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str: return len(self.graph[u] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any: A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s A = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return sorted_nodes def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return False def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str: A = time() self.dfs(A_ ,A_ ) A = time() return end - begin def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict: A = time() self.bfs(A_ ) A = time() return end - begin class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ) -> Tuple: A = {} def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict: # check if the u exists if self.graph.get(A_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A = [[w, v]] # add the other way if self.graph.get(A_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A = [[w, u]] def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]: if self.graph.get(A_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A_ ) # the other way round if self.graph.get(A_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int: if s == d: return [] A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]: if c == -1: A = floor(random() * 1_0000 ) + 10 for i in range(A_ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A = floor(random() * c ) + 1 if n != i: self.add_pair(A_ ,A_ ,1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]: A = deque() A = [] if s == -2: A = list(self.graph )[0] d.append(A_ ) visited.append(A_ ) while d: A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]: return len(self.graph[u] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: return list(self.graph ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any: A = time() self.dfs(A_ ,A_ ) A = time() return end - begin def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]: A = time() self.bfs(A_ ) A = time() return end - begin
22
0
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Optional[int]=None ,A_ : List[Any]=None ,A_ : int=None ,A_ : Optional[int]="resnet50" ,A_ : Optional[Any]=3 ,A_ : Any=32 ,A_ : Any=3 ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,) -> Dict: A = parent A = out_indices if out_indices is not None else [4] A = stage_names A = out_features A = backbone A = batch_size A = image_size A = num_channels A = use_pretrained_backbone A = is_training def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = self.get_config() return config, pixel_values def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: return TimmBackboneConfig( image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str] ,A_ : int ) -> List[str]: A = TimmBackbone(config=A_ ) model.to(A_ ) model.eval() with torch.no_grad(): A = model(A_ ) self.parent.assertEqual( result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = self.prepare_config_and_inputs() A , A = config_and_inputs A = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = (TimmBackbone,) if is_torch_available() else () _lowerCamelCase: Union[str, Any] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: str = False _lowerCamelCase: Tuple = False _lowerCamelCase: List[str] = False def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: A = TimmBackboneModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = 'resnet18' A = 'microsoft/resnet-18' A = AutoBackbone.from_pretrained(A_ ,use_timm_backbone=A_ ) A = AutoBackbone.from_pretrained(A_ ) self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels ,transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices ,(-1,) ) self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] ) A = AutoBackbone.from_pretrained(A_ ,use_timm_backbone=A_ ,out_indices=[1, 2, 3] ) A = AutoBackbone.from_pretrained(A_ ,out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices ,transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels ,transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: pass @unittest.skip('Safetensors is not supported by timm.' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = True A = self.has_attentions # no need to test all models as different heads yield the same functionality A = self.all_model_classes[0] A = model_class(A_ ) model.to(A_ ) A = self._prepare_for_class(A_ ,A_ ) A = model(**A_ ) A = outputs[0][-1] # Encoder-/Decoder-only models A = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: A = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=A_ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) model.to(A_ ) model.eval() A = model(**A_ ) self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) ) self.assertEqual(len(model.channels ) ,len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None A = copy.deepcopy(A_ ) A = None A = model_class(A_ ) model.to(A_ ) model.eval() A = model(**A_ ) self.assertEqual(len(result.feature_maps ) ,1 ) self.assertEqual(len(model.channels ) ,1 ) # Check backbone can be initialized with fresh weights A = copy.deepcopy(A_ ) A = False A = model_class(A_ ) model.to(A_ ) model.eval() A = model(**A_ )
707
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _snake_case ( snake_case__ : str = "isbn/0140328726" ): A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: A = F'{olid} is not a valid Open Library olid' raise ValueError(snake_case__ ) return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json() def _snake_case ( snake_case__ : dict ): A = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} A = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] A = data['First sentence']['value'] for key, value in data.items(): if isinstance(snake_case__ , snake_case__ ): A = ', '.join(snake_case__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: _lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(F"""\nSearching Open Library for ISBN: {isbn}...\n""") try: _lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}""")) print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"""Sorry, there are no results for ISBN: {isbn}.""")
22
0
"""simple docstring""" def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if number < 0: return False A = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
708
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''], '''tokenization_perceiver''': ['''PerceiverTokenizer'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''PerceiverFeatureExtractor'''] _lowercase = ['''PerceiverImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PerceiverForImageClassificationConvProcessing''', '''PerceiverForImageClassificationFourier''', '''PerceiverForImageClassificationLearned''', '''PerceiverForMaskedLM''', '''PerceiverForMultimodalAutoencoding''', '''PerceiverForOpticalFlow''', '''PerceiverForSequenceClassification''', '''PerceiverLayer''', '''PerceiverModel''', '''PerceiverPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
0
import numpy as np def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Dict ): A = int(np.ceil((x_end - xa) / h ) ) A = np.zeros((n + 1,) ) A = ya A = xa for k in range(snake_case__ ): A = f(snake_case__ , y[k] ) A = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) A = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) A = f(x + h , y[k] + h * ka ) A = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
709
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def _snake_case ( snake_case__ : int ): A = SwinvaConfig() A = swinva_name.split('_' ) A = name_split[1] if "to" in name_split[3]: A = int(name_split[3][-3:] ) else: A = int(name_split[3] ) if "to" in name_split[2]: A = int(name_split[2][-2:] ) else: A = int(name_split[2][6:] ) if model_size == "tiny": A = 96 A = (2, 2, 6, 2) A = (3, 6, 12, 24) elif model_size == "small": A = 96 A = (2, 2, 18, 2) A = (3, 6, 12, 24) elif model_size == "base": A = 128 A = (2, 2, 18, 2) A = (4, 8, 16, 32) else: A = 192 A = (2, 2, 18, 2) A = (6, 12, 24, 48) if "to" in swinva_name: A = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): A = 2_1841 A = 'huggingface/label-files' A = 'imagenet-22k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} else: A = 1000 A = 'huggingface/label-files' A = 'imagenet-1k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} A = img_size A = num_classes A = embed_dim A = depths A = num_heads A = window_size return config def _snake_case ( snake_case__ : List[Any] ): if "patch_embed.proj" in name: A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: A = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: A = 'encoder.' + name if "attn.proj" in name: A = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A = name.replace('attn' , 'attention.self' ) if "norm1" in name: A = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: A = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: A = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: A = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if name == "norm.weight": A = 'layernorm.weight' if name == "norm.bias": A = 'layernorm.bias' if "head" in name: A = name.replace('head' , 'classifier' ) else: A = 'swinv2.' + name return name def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ): for key in orig_state_dict.copy().keys(): A = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A = key.split('.' ) A = int(key_split[1] ) A = int(key_split[3] ) A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A = val[:dim, :] A = val[dim : dim * 2, :] A = val[-dim:, :] else: A = val[:dim] A = val[ dim : dim * 2 ] A = val[-dim:] else: A = val return orig_state_dict def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ): A = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A = get_swinva_config(snake_case__ ) A = SwinvaForImageClassification(snake_case__ ) model.eval() A = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) ) A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A = image_processor(images=snake_case__ , return_tensors='pt' ) A = timm_model(inputs['pixel_values'] ) A = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) model.push_to_hub( repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swinv2_name''', default='''swinv2_tiny_patch4_window8_256''', type=str, help='''Name of the Swinv2 timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowercase = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
22
0
"""simple docstring""" import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __get__( self : Optional[int] ,A_ : int ,A_ : Tuple=None ) -> Dict: # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute' ) A = '__cached_' + self.fget.__name__ A = getattr(A_ ,A_ ,A_ ) if cached is None: A = self.fget(A_ ) setattr(A_ ,A_ ,A_ ) return cached def _snake_case ( snake_case__ : Union[str, Any] ): A = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F'invalid truth value {val!r}' ) def _snake_case ( snake_case__ : str ): if is_torch_fx_proxy(snake_case__ ): return True if is_torch_available(): import torch if isinstance(snake_case__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(snake_case__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(snake_case__ , (jnp.ndarray, Tracer) ): return True return isinstance(snake_case__ , np.ndarray ) def _snake_case ( snake_case__ : Optional[Any] ): return isinstance(snake_case__ , np.ndarray ) def _snake_case ( snake_case__ : List[Any] ): return _is_numpy(snake_case__ ) def _snake_case ( snake_case__ : List[Any] ): import torch return isinstance(snake_case__ , torch.Tensor ) def _snake_case ( snake_case__ : List[str] ): return False if not is_torch_available() else _is_torch(snake_case__ ) def _snake_case ( snake_case__ : List[Any] ): import torch return isinstance(snake_case__ , torch.device ) def _snake_case ( snake_case__ : Optional[int] ): return False if not is_torch_available() else _is_torch_device(snake_case__ ) def _snake_case ( snake_case__ : Optional[int] ): import torch if isinstance(snake_case__ , snake_case__ ): if hasattr(snake_case__ , snake_case__ ): A = getattr(snake_case__ , snake_case__ ) else: return False return isinstance(snake_case__ , torch.dtype ) def _snake_case ( snake_case__ : List[Any] ): return False if not is_torch_available() else _is_torch_dtype(snake_case__ ) def _snake_case ( snake_case__ : Dict ): import tensorflow as tf return isinstance(snake_case__ , tf.Tensor ) def _snake_case ( snake_case__ : str ): return False if not is_tf_available() else _is_tensorflow(snake_case__ ) def _snake_case ( snake_case__ : List[Any] ): import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(snake_case__ , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(snake_case__ ) return type(snake_case__ ) == tf.Tensor def _snake_case ( snake_case__ : str ): return False if not is_tf_available() else _is_tf_symbolic_tensor(snake_case__ ) def _snake_case ( snake_case__ : List[str] ): import jax.numpy as jnp # noqa: F811 return isinstance(snake_case__ , jnp.ndarray ) def _snake_case ( snake_case__ : int ): return False if not is_flax_available() else _is_jax(snake_case__ ) def _snake_case ( snake_case__ : List[Any] ): if isinstance(snake_case__ , (dict, UserDict) ): return {k: to_py_obj(snake_case__ ) for k, v in obj.items()} elif isinstance(snake_case__ , (list, tuple) ): return [to_py_obj(snake_case__ ) for o in obj] elif is_tf_tensor(snake_case__ ): return obj.numpy().tolist() elif is_torch_tensor(snake_case__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(snake_case__ ): return np.asarray(snake_case__ ).tolist() elif isinstance(snake_case__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def _snake_case ( snake_case__ : int ): if isinstance(snake_case__ , (dict, UserDict) ): return {k: to_numpy(snake_case__ ) for k, v in obj.items()} elif isinstance(snake_case__ , (list, tuple) ): return np.array(snake_case__ ) elif is_tf_tensor(snake_case__ ): return obj.numpy() elif is_torch_tensor(snake_case__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(snake_case__ ): return np.asarray(snake_case__ ) else: return obj class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str ) -> str: A = fields(self ) # Safety and consistency checks if not len(A_ ): raise ValueError(F'{self.__class__.__name__} has no fields.' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' ) A = getattr(self ,class_fields[0].name ) A = all(getattr(self ,field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(A_ ): if isinstance(A_ ,A_ ): A = first_field.items() A = True else: try: A = iter(A_ ) A = True except TypeError: A = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(A_ ): if ( not isinstance(A_ ,(list, tuple) ) or not len(A_ ) == 2 or not isinstance(element[0] ,A_ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute A = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' ) break setattr(self ,element[0] ,element[1] ) if element[1] is not None: A = element[1] elif first_field is not None: A = first_field else: for field in class_fields: A = getattr(self ,field.name ) if v is not None: A = v def __delitem__( self : Optional[int] ,*A_ : Optional[int] ,**A_ : Dict ) -> Tuple: raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' ) def _SCREAMING_SNAKE_CASE ( self : int ,*A_ : Optional[int] ,**A_ : List[Any] ) -> Dict: raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Dict: raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : Union[str, Any] ,**A_ : Dict ) -> Dict: raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' ) def __getitem__( self : int ,A_ : Any ) -> str: if isinstance(A_ ,A_ ): A = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Optional[Any] ,A_ : int ,A_ : Dict ) -> Dict: if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(A_ ,A_ ) super().__setattr__(A_ ,A_ ) def __setitem__( self : int ,A_ : Dict ,A_ : Tuple ) -> int: # Will raise a KeyException if needed super().__setitem__(A_ ,A_ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple[Any]: return tuple(self[k] for k in self.keys() ) class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,A_ : int ) -> Tuple: raise ValueError( F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: str = '''longest''' _lowerCamelCase: Optional[int] = '''max_length''' _lowerCamelCase: Any = '''do_not_pad''' class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = '''pt''' _lowerCamelCase: Union[str, Any] = '''tf''' _lowerCamelCase: Tuple = '''np''' _lowerCamelCase: Dict = '''jax''' class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int ,A_ : List[ContextManager] ) -> Any: A = context_managers A = ExitStack() def __enter__( self : Tuple ) -> List[Any]: for context_manager in self.context_managers: self.stack.enter_context(A_ ) def __exit__( self : int ,*A_ : str ,**A_ : str ) -> Any: self.stack.__exit__(*A_ ,**A_ ) def _snake_case ( snake_case__ : Union[str, Any] ): A = infer_framework(snake_case__ ) if framework == "tf": A = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": A = inspect.signature(model_class.forward ) # PyTorch models else: A = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def _snake_case ( snake_case__ : List[str] ): A = model_class.__name__ A = infer_framework(snake_case__ ) if framework == "tf": A = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": A = inspect.signature(model_class.forward ) # PyTorch models else: A = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def _snake_case ( snake_case__ : MutableMapping , snake_case__ : str = "" , snake_case__ : str = "." ): def _flatten_dict(snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]="" , snake_case__ : str="." ): for k, v in d.items(): A = str(snake_case__ ) + delimiter + str(snake_case__ ) if parent_key else k if v and isinstance(snake_case__ , snake_case__ ): yield from flatten_dict(snake_case__ , snake_case__ , delimiter=snake_case__ ).items() else: yield key, v return dict(_flatten_dict(snake_case__ , snake_case__ , snake_case__ ) ) @contextmanager def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : bool = False ): if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def _snake_case ( snake_case__ : int , snake_case__ : Union[str, Any]=None ): if is_numpy_array(snake_case__ ): return np.transpose(snake_case__ , axes=snake_case__ ) elif is_torch_tensor(snake_case__ ): return array.T if axes is None else array.permute(*snake_case__ ) elif is_tf_tensor(snake_case__ ): import tensorflow as tf return tf.transpose(snake_case__ , perm=snake_case__ ) elif is_jax_tensor(snake_case__ ): return jnp.transpose(snake_case__ , axes=snake_case__ ) else: raise ValueError(F'Type not supported for transpose: {type(snake_case__ )}.' ) def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ): if is_numpy_array(snake_case__ ): return np.reshape(snake_case__ , snake_case__ ) elif is_torch_tensor(snake_case__ ): return array.reshape(*snake_case__ ) elif is_tf_tensor(snake_case__ ): import tensorflow as tf return tf.reshape(snake_case__ , snake_case__ ) elif is_jax_tensor(snake_case__ ): return jnp.reshape(snake_case__ , snake_case__ ) else: raise ValueError(F'Type not supported for reshape: {type(snake_case__ )}.' ) def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Any=None ): if is_numpy_array(snake_case__ ): return np.squeeze(snake_case__ , axis=snake_case__ ) elif is_torch_tensor(snake_case__ ): return array.squeeze() if axis is None else array.squeeze(dim=snake_case__ ) elif is_tf_tensor(snake_case__ ): import tensorflow as tf return tf.squeeze(snake_case__ , axis=snake_case__ ) elif is_jax_tensor(snake_case__ ): return jnp.squeeze(snake_case__ , axis=snake_case__ ) else: raise ValueError(F'Type not supported for squeeze: {type(snake_case__ )}.' ) def _snake_case ( snake_case__ : List[Any] , snake_case__ : int ): if is_numpy_array(snake_case__ ): return np.expand_dims(snake_case__ , snake_case__ ) elif is_torch_tensor(snake_case__ ): return array.unsqueeze(dim=snake_case__ ) elif is_tf_tensor(snake_case__ ): import tensorflow as tf return tf.expand_dims(snake_case__ , axis=snake_case__ ) elif is_jax_tensor(snake_case__ ): return jnp.expand_dims(snake_case__ , axis=snake_case__ ) else: raise ValueError(F'Type not supported for expand_dims: {type(snake_case__ )}.' ) def _snake_case ( snake_case__ : Optional[int] ): if is_numpy_array(snake_case__ ): return np.size(snake_case__ ) elif is_torch_tensor(snake_case__ ): return array.numel() elif is_tf_tensor(snake_case__ ): import tensorflow as tf return tf.size(snake_case__ ) elif is_jax_tensor(snake_case__ ): return array.size else: raise ValueError(F'Type not supported for expand_dims: {type(snake_case__ )}.' ) def _snake_case ( snake_case__ : int , snake_case__ : Optional[Any] ): for key, value in auto_map.items(): if isinstance(snake_case__ , (tuple, list) ): A = [F'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: A = F'{repo_id}--{value}' return auto_map def _snake_case ( snake_case__ : int ): for base_class in inspect.getmro(snake_case__ ): A = base_class.__module__ A = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F'Could not infer framework from class {model_class}.' )
710
"""simple docstring""" from math import pi, sqrt def _snake_case ( snake_case__ : float ): if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(snake_case__ ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(snake_case__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _snake_case ( ): assert gamma(0.5 ) == sqrt(snake_case__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _lowercase = 1.0 while num: _lowercase = float(input('''Gamma of: ''')) print(F"""gamma({num}) = {gamma(num)}""") print('''\nEnter 0 to exit...''')
22
0
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = 1 A = 3 A = (32, 32) A = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(A_ ) return image @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,) return model @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) return model @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: torch.manual_seed(0 ) A = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: def extract(*A_ : List[str] ,**A_ : int ): class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ) -> Optional[Any]: A = torch.ones([0] ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Union[str, Any] ) -> int: self.pixel_values.to(A_ ) return self return Out() return extract def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.dummy_cond_unet A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) A = self.dummy_vae A = self.dummy_text_encoder A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk A = StableDiffusionPipeline( unet=A_ ,scheduler=A_ ,vae=A_ ,text_encoder=A_ ,tokenizer=A_ ,safety_checker=A_ ,feature_extractor=self.dummy_extractor ,) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = 'A painting of a squirrel eating a burger' A = torch.Generator(device=A_ ).manual_seed(0 ) A = sd_pipe([prompt] ,generator=A_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ) A = output.images A = torch.Generator(device=A_ ).manual_seed(0 ) A = sd_pipe( [prompt] ,generator=A_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ,return_dict=A_ ,)[0] A = image[0, -3:, -3:, -1] A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.dummy_cond_unet A = PNDMScheduler(skip_prk_steps=A_ ) A = self.dummy_vae A = self.dummy_text_encoder A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk A = StableDiffusionPipeline( unet=A_ ,scheduler=A_ ,vae=A_ ,text_encoder=A_ ,tokenizer=A_ ,safety_checker=A_ ,feature_extractor=self.dummy_extractor ,) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = 'A painting of a squirrel eating a burger' A = torch.Generator(device=A_ ).manual_seed(0 ) A = sd_pipe([prompt] ,generator=A_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ) A = output.images A = torch.Generator(device=A_ ).manual_seed(0 ) A = sd_pipe( [prompt] ,generator=A_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ,return_dict=A_ ,)[0] A = image[0, -3:, -3:, -1] A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' ,safety_checker=A_ ) assert isinstance(A_ ,A_ ) assert isinstance(pipe.scheduler ,A_ ) assert pipe.safety_checker is None A = pipe('example prompt' ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A_ ) A = StableDiffusionPipeline.from_pretrained(A_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None A = pipe('example prompt' ,num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU' ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: A = self.dummy_cond_unet A = PNDMScheduler(skip_prk_steps=A_ ) A = self.dummy_vae A = self.dummy_text_encoder A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 A = unet.half() A = vae.half() A = bert.half() # make sure here that pndm scheduler skips prk A = StableDiffusionPipeline( unet=A_ ,scheduler=A_ ,vae=A_ ,text_encoder=A_ ,tokenizer=A_ ,safety_checker=A_ ,feature_extractor=self.dummy_extractor ,) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = 'A painting of a squirrel eating a burger' A = sd_pipe([prompt] ,num_inference_steps=2 ,output_type='np' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: A = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ,safety_checker=A_ ) A = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) A = 40_0366_0346 A = 7 # without safety guidance (sld_guidance_scale = 0) A = torch.manual_seed(A_ ) A = sd_pipe( [prompt] ,generator=A_ ,guidance_scale=A_ ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=0 ,) A = output.images A = image[0, -3:, -3:, -1] A = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) A = torch.manual_seed(A_ ) A = sd_pipe( [prompt] ,generator=A_ ,guidance_scale=A_ ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) A = output.images A = image[0, -3:, -3:, -1] A = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: A = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ,safety_checker=A_ ) A = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = 'padme amidala taking a bath artwork, safe for work, no nudity' A = 27_3497_1755 A = 7 A = torch.manual_seed(A_ ) A = sd_pipe( [prompt] ,generator=A_ ,guidance_scale=A_ ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=0 ,) A = output.images A = image[0, -3:, -3:, -1] A = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 A = torch.manual_seed(A_ ) A = sd_pipe( [prompt] ,generator=A_ ,guidance_scale=A_ ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) A = output.images A = image[0, -3:, -3:, -1] A = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) A = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) A = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) A = 10_4435_5234 A = 12 A = torch.manual_seed(A_ ) A = sd_pipe( [prompt] ,generator=A_ ,guidance_scale=A_ ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=0 ,) A = output.images A = image[0, -3:, -3:, -1] A = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 A = torch.manual_seed(A_ ) A = sd_pipe( [prompt] ,generator=A_ ,guidance_scale=A_ ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) A = output.images A = image[0, -3:, -3:, -1] A = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
711
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: torch.FloatTensor class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]: super().__init__() A = layers_per_block A = torch.nn.Convad( A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,) A = None A = nn.ModuleList([] ) # down A = block_out_channels[0] for i, down_block_type in enumerate(A_ ): A = output_channel A = block_out_channels[i] A = i == len(A_ ) - 1 A = get_down_block( A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,) self.down_blocks.append(A_ ) # mid A = UNetMidBlockaD( in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,) # out A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 ) A = nn.SiLU() A = 2 * out_channels if double_z else out_channels A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 ) A = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]: A = x A = self.conv_in(A_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(A_ : Dict ): def custom_forward(*A_ : Tuple ): return module(*A_ ) return custom_forward # down if is_torch_version('>=' ,'1.11.0' ): for down_block in self.down_blocks: A = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ ) # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ ) else: for down_block in self.down_blocks: A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ) # middle A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ ) else: # down for down_block in self.down_blocks: A = down_block(A_ ) # middle A = self.mid_block(A_ ) # post-process A = self.conv_norm_out(A_ ) A = self.conv_act(A_ ) A = self.conv_out(A_ ) return sample class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any: super().__init__() A = layers_per_block A = nn.Convad( A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,) A = None A = nn.ModuleList([] ) A = in_channels if norm_type == 'spatial' else None # mid A = UNetMidBlockaD( in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,) # up A = list(reversed(A_ ) ) A = reversed_block_out_channels[0] for i, up_block_type in enumerate(A_ ): A = output_channel A = reversed_block_out_channels[i] A = i == len(A_ ) - 1 A = get_up_block( A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,) self.up_blocks.append(A_ ) A = output_channel # out if norm_type == "spatial": A = SpatialNorm(block_out_channels[0] ,A_ ) else: A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 ) A = nn.SiLU() A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 ) A = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any: A = z A = self.conv_in(A_ ) A = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(A_ : List[Any] ): def custom_forward(*A_ : Tuple ): return module(*A_ ) return custom_forward if is_torch_version('>=' ,'1.11.0' ): # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ ) else: # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ ) else: # middle A = self.mid_block(A_ ,A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = up_block(A_ ,A_ ) # post-process if latent_embeds is None: A = self.conv_norm_out(A_ ) else: A = self.conv_norm_out(A_ ,A_ ) A = self.conv_act(A_ ) A = self.conv_out(A_ ) return sample class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]: super().__init__() A = n_e A = vq_embed_dim A = beta A = legacy A = nn.Embedding(self.n_e ,self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e ) A = remap if self.remap is not None: self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) ) A = self.used.shape[0] A = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": A = self.re_embed A = self.re_embed + 1 print( F'Remapping {self.n_e} indices to {self.re_embed} indices. ' F'Using {self.unknown_index} for unknown indices.' ) else: A = n_e A = sane_index_shape def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any: A = inds.shape assert len(A_ ) > 1 A = inds.reshape(ishape[0] ,-1 ) A = self.used.to(A_ ) A = (inds[:, :, None] == used[None, None, ...]).long() A = match.argmax(-1 ) A = match.sum(2 ) < 1 if self.unknown_index == "random": A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device ) else: A = self.unknown_index return new.reshape(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = inds.shape assert len(A_ ) > 1 A = inds.reshape(ishape[0] ,-1 ) A = self.used.to(A_ ) if self.re_embed > self.used.shape[0]: # extra token A = 0 # simply set to zero A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ ) return back.reshape(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str: # reshape z -> (batch, height, width, channel) and flatten A = z.permute(0 ,2 ,3 ,1 ).contiguous() A = z.view(-1 ,self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 ) A = self.embedding(A_ ).view(z.shape ) A = None A = None # compute loss for embedding if not self.legacy: A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients A = z + (z_q - z).detach() # reshape back to match original input shape A = z_q.permute(0 ,3 ,1 ,2 ).contiguous() if self.remap is not None: A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis A = self.remap_to_used(A_ ) A = min_encoding_indices.reshape(-1 ,1 ) # flatten if self.sane_index_shape: A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]: # shape specifying (batch, height, width, channel) if self.remap is not None: A = indices.reshape(shape[0] ,-1 ) # add batch axis A = self.unmap_to_all(A_ ) A = indices.reshape(-1 ) # flatten again # get quantized latent vectors A = self.embedding(A_ ) if shape is not None: A = z_q.view(A_ ) # reshape back to match original input shape A = z_q.permute(0 ,3 ,1 ,2 ).contiguous() return z_q class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]: A = parameters A , A = torch.chunk(A_ ,2 ,dim=1 ) A = torch.clamp(self.logvar ,-30.0 ,20.0 ) A = deterministic A = torch.exp(0.5 * self.logvar ) A = torch.exp(self.logvar ) if self.deterministic: A = A = torch.zeros_like( self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype A = randn_tensor( self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype ) A = self.mean + self.std * sample return x def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean ,2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar ,dim=[1, 2, 3] ,) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]: if self.deterministic: return torch.Tensor([0.0] ) A = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: return self.mean
22
0
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME _lowercase = ['''small''', '''medium''', '''large'''] _lowercase = '''lm_head.decoder.weight''' _lowercase = '''lm_head.weight''' def _snake_case ( snake_case__ : str , snake_case__ : str ): A = torch.load(snake_case__ ) A = d.pop(snake_case__ ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) _lowercase = parser.parse_args() for MODEL in DIALOGPT_MODELS: _lowercase = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") _lowercase = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
712
"""simple docstring""" def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ): A = len(snake_case__ ) A = [[0] * n for i in range(snake_case__ )] for i in range(snake_case__ ): A = y_points[i] for i in range(2 , snake_case__ ): for j in range(snake_case__ , snake_case__ ): A = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
22
0
import math def _snake_case ( snake_case__ : list , snake_case__ : int ): A = len(snake_case__ ) A = int(math.floor(math.sqrt(snake_case__ ) ) ) A = 0 while arr[min(snake_case__ , snake_case__ ) - 1] < x: A = step step += int(math.floor(math.sqrt(snake_case__ ) ) ) if prev >= n: return -1 while arr[prev] < x: A = prev + 1 if prev == min(snake_case__ , snake_case__ ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": _lowercase = input('''Enter numbers separated by a comma:\n''').strip() _lowercase = [int(item) for item in user_input.split(''',''')] _lowercase = int(input('''Enter the number to be searched:\n''')) _lowercase = jump_search(arr, x) if res == -1: print('''Number not found!''') else: print(F"""Number {x} is at index {res}""")
713
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]: A = parent A = batch_size A = is_training A = use_auxiliary_loss A = num_queries A = num_channels A = min_size A = max_size A = num_labels A = hidden_dim A = hidden_dim def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( A_ ) A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ ) A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5 ).float() A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long() A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: A = MaskaFormerConfig( hidden_size=self.hidden_dim ,) A = self.num_queries A = self.num_labels A = [1, 1, 1, 1] A = self.num_channels A = 64 A = 128 A = self.hidden_dim A = self.hidden_dim A = self.hidden_dim return config def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A , A , A , A , A = self.prepare_config_and_inputs() A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]: A = output.encoder_hidden_states A = output.pixel_decoder_hidden_states A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A_ ) ,config.decoder_layers ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str: with torch.no_grad(): A = MaskaFormerModel(config=A_ ) model.to(A_ ) model.eval() A = model(pixel_values=A_ ,pixel_mask=A_ ) A = model(A_ ,output_hidden_states=A_ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]: A = MaskaFormerForUniversalSegmentation(config=A_ ) model.to(A_ ) model.eval() def comm_check_on_output(A_ : str ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): A = model(pixel_values=A_ ,pixel_mask=A_ ) A = model(A_ ) comm_check_on_output(A_ ) A = model( pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ ) comm_check_on_output(A_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} _lowerCamelCase: int = False _lowerCamelCase: Dict = False _lowerCamelCase: List[str] = False _lowerCamelCase: int = False def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = MaskaFormerModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: A = MaskaFormerModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = (self.model_tester.min_size,) * 2 A = { 'pixel_values': torch.randn((2, 3, *size) ,device=A_ ), 'mask_labels': torch.randn((2, 10, *size) ,device=A_ ), 'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(), } A = self.model_tester.get_config() A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ ) A = model(**A_ ) self.assertTrue(outputs.loss is not None ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ).to(A_ ) A = model(**A_ ,output_attentions=A_ ) self.assertTrue(outputs.attentions is not None ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: if not self.model_tester.is_training: return A = self.all_model_classes[1] A , A , A , A , A = self.model_tester.prepare_config_and_inputs() A = model_class(A_ ) model.to(A_ ) model.train() A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = self.all_model_classes[1] A , A , A , A , A = self.model_tester.prepare_config_and_inputs() A = True A = True A = model_class(A_ ).to(A_ ) model.train() A = model(A_ ,mask_labels=A_ ,class_labels=A_ ) A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=A_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowercase = 1e-4 def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ ) A = self.default_image_processor A = prepare_img() A = image_processor(A_ ,return_tensors='pt' ).to(A_ ) A = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A_ ,(1, 3, 384, 384) ) with torch.no_grad(): A = model(**A_ ) A = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) ) A = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) ) A = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval() A = self.default_image_processor A = prepare_img() A = image_processor(A_ ,return_tensors='pt' ).to(A_ ) A = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A_ ,(1, 3, 384, 384) ) with torch.no_grad(): A = model(**A_ ) # masks_queries_logits A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) A = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] A = torch.tensor(A_ ).to(A_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) ) # class_queries_logits A = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) ) A = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(A_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval() A = self.default_image_processor A = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,) A = inputs['pixel_values'].to(A_ ) A = [el.to(A_ ) for el in inputs['mask_labels']] A = [el.to(A_ ) for el in inputs['class_labels']] with torch.no_grad(): A = model(**A_ ) self.assertTrue(outputs.loss is not None )
22
0
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def _snake_case ( snake_case__ : dict ): return (data["data"], data["target"]) def _snake_case ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : np.ndarray ): A = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(snake_case__ , snake_case__ ) # Predict target for test data A = xgb.predict(snake_case__ ) A = predictions.reshape(len(snake_case__ ) , 1 ) return predictions def _snake_case ( ): A = fetch_california_housing() A , A = data_handling(snake_case__ ) A , A , A , A = train_test_split( snake_case__ , snake_case__ , test_size=0.25 , random_state=1 ) A = xgboost(snake_case__ , snake_case__ , snake_case__ ) # Error printing print(F'Mean Absolute Error : {mean_absolute_error(snake_case__ , snake_case__ )}' ) print(F'Mean Square Error : {mean_squared_error(snake_case__ , snake_case__ )}' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
714
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]: A = parent A = 13 A = 7 A = True A = True A = True A = 99 A = 32 A = 2 A = 4 A = 37 A = 'gelu' A = 0.1 A = 0.1 A = 512 A = 16 A = 2 A = 0.02 A = 3 A = 4 A = None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = self.prepare_config_and_inputs() A = True A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict: A = TFEsmModel(config=A_ ) A = {'input_ids': input_ids, 'attention_mask': input_mask} A = model(A_ ) A = [input_ids, input_mask] A = model(A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]: A = True A = TFEsmModel(config=A_ ) A = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } A = model(A_ ) A = [input_ids, input_mask] A = model(A_ ,encoder_hidden_states=A_ ) # Also check the case where encoder outputs are not passed A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict: A = TFEsmForMaskedLM(config=A_ ) A = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]: A = self.num_labels A = TFEsmForTokenClassification(config=A_ ) A = {'input_ids': input_ids, 'attention_mask': input_mask} A = model(A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) _lowerCamelCase: List[str] = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: A = TFEsmModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFEsmModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @unittest.skip('Protein models do not support embedding resizing.' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip('Protein models do not support embedding resizing.' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A = model.get_bias() assert isinstance(A_ ,A_ ) for k, v in name.items(): assert isinstance(A_ ,tf.Variable ) else: A = model.get_output_embeddings() assert x is None A = model.get_bias() assert name is None @require_tf class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A = tf.constant([[0, 1, 2, 3, 4, 5]] ) A = model(A_ )[0] A = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) ,A_ ) # compare the actual values for a slice. A = tf.constant( [ [ [8.92_15_18, -10.58_98_14, -6.4_67_13_07], [-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15], [-7.78_12_47, -13.95_15_57, -3.74_05_92], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A = model(A_ )[0] # compare the actual values for a slice. A = tf.constant( [ [ [0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39], [0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22], [0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
22
0
"""simple docstring""" import re import subprocess import sys _lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') _lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split() _lowercase = '''|'''.join(sys.argv[1:]) _lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""") _lowercase = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
715
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys _lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') _lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split() _lowercase = '''|'''.join(sys.argv[1:]) _lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""") _lowercase = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
22
0
"""simple docstring""" def _snake_case ( snake_case__ : str ): A = [int(snake_case__ ) for i in ip_va_address.split('.' ) if i.isdigit()] return len(snake_case__ ) == 4 and all(0 <= int(snake_case__ ) <= 254 for octet in octets ) if __name__ == "__main__": _lowercase = input().strip() _lowercase = '''valid''' if is_ip_va_address_valid(ip) else '''invalid''' print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
716
"""simple docstring""" import sys from collections import defaultdict class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ) -> int: A = [] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]: return self.node_position[vertex] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]: A = pos def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: A = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: A = 2 * start + 1 else: A = 2 * start + 2 if heap[smallest_child] < heap[start]: A , A = heap[smallest_child], positions[smallest_child] A , A = ( heap[start], positions[start], ) A , A = temp, tempa A = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,A_ ) self.top_to_bottom(A_ ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict: A = position[index] while index != 0: A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: A = heap[parent] A = position[parent] self.set_position(position[parent] ,A_ ) else: A = val A = temp self.set_position(A_ ,A_ ) break A = parent else: A = val A = temp self.set_position(A_ ,0 ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]: A = len(A_ ) // 2 - 1 for i in range(A_ ,-1 ,-1 ): self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]: A = positions[0] A = sys.maxsize self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ ) return temp def _snake_case ( snake_case__ : Dict ): A = Heap() A = [0] * len(snake_case__ ) A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph A = [] # Heap of Distance of vertices from their neighboring vertex A = [] for vertex in range(len(snake_case__ ) ): distance_tv.append(sys.maxsize ) positions.append(snake_case__ ) heap.node_position.append(snake_case__ ) A = [] A = 1 A = sys.maxsize for neighbor, distance in adjacency_list[0]: A = 0 A = distance heap.heapify(snake_case__ , snake_case__ ) for _ in range(1 , len(snake_case__ ) ): A = heap.delete_minimum(snake_case__ , snake_case__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) A = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(snake_case__ )] ): A = distance heap.bottom_to_top( snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ ) A = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _lowercase = int(input('''Enter number of edges: ''').strip()) _lowercase = defaultdict(list) for _ in range(edges_number): _lowercase = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
22
0
"""simple docstring""" def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , ): A = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: A = 1 - (matter_density + radiation_density + dark_energy) A = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) A = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _lowercase = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
717
"""simple docstring""" import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter _lowercase = True except ImportError: _lowercase = False _lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( snake_case__ : Namespace ): return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('add-new-model' ) add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' ) add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' ) add_new_model_parser.add_argument( '--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' ) add_new_model_parser.set_defaults(func=A_ ) def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]: A = testing A = testing_file A = path def _SCREAMING_SNAKE_CASE ( self : int ) -> int: warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.' ) if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(A_ ) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.' ) A = ( Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) A = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(A_ ) ) else: with open(self._testing_file ,'r' ) as configuration_file: A = json.load(A_ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,) A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + '/configuration.json' ,'r' ) as configuration_file: A = json.load(A_ ) A = configuration['lowercase_modelname'] A = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(F'{directory}/configuration.json' ) A = 'PyTorch' in generate_tensorflow_pytorch_and_flax A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax A = 'Flax' in generate_tensorflow_pytorch_and_flax A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(A_ ,exist_ok=A_ ) os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ ) # Tests require submodules as they have parent imports with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ): pass shutil.move( F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,) shutil.move( F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,) def remove_copy_lines(A_ : int ): with open(A_ ,'r' ) as f: A = f.readlines() with open(A_ ,'w' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(A_ ) if output_pytorch: if not self._testing: remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,) shutil.move( F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(A_ : str ,A_ : str ,A_ : List[str] ): # Create temp file A , A = mkstemp() A = False with fdopen(A_ ,'w' ) as new_file: with open(A_ ) as old_file: for line in old_file: new_file.write(A_ ) if line_to_copy_below in line: A = True for line_to_copy in lines_to_copy: new_file.write(A_ ) if not line_found: raise ValueError(F'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(A_ ,A_ ) # Remove original file remove(A_ ) # Move new file move(A_ ,A_ ) def skip_units(A_ : Dict ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(A_ : Tuple ): with open(A_ ) as datafile: A = [] A = False A = False for line in datafile: if "# To replace in: " in line and "##" not in line: A = line.split('"' )[1] A = skip_units(A_ ) elif "# Below: " in line and "##" not in line: A = line.split('"' )[1] A = skip_units(A_ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(A_ ,A_ ,A_ ) A = [] elif "# Replace with" in line and "##" not in line: A = [] elif "##" not in line: lines_to_copy.append(A_ ) remove(A_ ) replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(A_ )
22
0
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowercase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = PegasusTokenizer _lowerCamelCase: Optional[int] = PegasusTokenizerFast _lowerCamelCase: str = True _lowerCamelCase: List[Any] = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: super().setUp() # We have a SentencePiece fixture for testing A = PegasusTokenizer(A_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def _SCREAMING_SNAKE_CASE ( self : int ,**A_ : List[Any] ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ) -> str: return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = '</s>' A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) ,A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<pad>' ) self.assertEqual(vocab_keys[1] ,'</s>' ) self.assertEqual(vocab_keys[-1] ,'v' ) self.assertEqual(len(A_ ) ,1103 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size ,1103 ) def _SCREAMING_SNAKE_CASE ( self : str ) -> str: A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) A = self.tokenizer_class.from_pretrained(self.tmpdirname ) A = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) A = rust_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] A = py_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word A = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' A = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] A = tokenizer([raw_input_str] ,return_tensors=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 A = 'To ensure a smooth flow of bank resolutions.' A = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] A = tokenizer([raw_input_str] ,return_tensors=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ['This is going to be way too long.' * 150, 'short example'] A = ['not super long but more than 5 tokens', 'tiny'] A = self._large_tokenizer(A_ ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) A = self._large_tokenizer( text_target=A_ ,max_length=5 ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(A_ ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: # fmt: off A = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,) @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = PegasusTokenizer _lowerCamelCase: Dict = PegasusTokenizerFast _lowerCamelCase: Any = True _lowerCamelCase: Dict = True def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing A = PegasusTokenizer(A_ ,offset=0 ,mask_token_sent=A_ ,mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def _SCREAMING_SNAKE_CASE ( self : str ,**A_ : Any ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict ) -> Dict: return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) A = self.tokenizer_class.from_pretrained(self.tmpdirname ) A = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) A = rust_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] A = py_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = ['This is going to be way too long.' * 1000, 'short example'] A = ['not super long but more than 5 tokens', 'tiny'] A = self._large_tokenizer(A_ ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) A = self._large_tokenizer( text_target=A_ ,max_length=5 ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(A_ ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) A = self._large_tokenizer(A_ ).input_ids self.assertListEqual( A_ ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,)
718
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str: A = size if size is not None else {'height': 18, 'width': 18} A = parent A = batch_size A = num_channels A = image_size A = min_resolution A = max_resolution A = do_resize A = size A = do_normalize def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ImageGPTImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ ,'clusters' ) ) self.assertTrue(hasattr(A_ ,'do_resize' ) ) self.assertTrue(hasattr(A_ ,'size' ) ) self.assertTrue(hasattr(A_ ,'do_normalize' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} ) A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: A = self.image_processing_class(**self.image_processor_dict ) A = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,obj[key] ) ) else: self.assertEqual(obj[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(A_ ,'image_processor.json' ) image_processor_first.to_json_file(A_ ) A = self.image_processing_class.from_json_file(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(A_ ) A = self.image_processing_class.from_pretrained(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) @unittest.skip('ImageGPT requires clusters at initialization' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: pass def _snake_case ( ): A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' ) A = Image.open(dataset[4]['file'] ) A = Image.open(dataset[5]['file'] ) A = [imagea, imagea] return images @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' ) A = prepare_images() # test non-batched A = image_processing(images[0] ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(1, 1024) ) A = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ ) # test batched A = image_processing(A_ ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(2, 1024) ) A = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ )
22
0
"""simple docstring""" from __future__ import annotations import bisect def _snake_case ( snake_case__ : list[int] , snake_case__ : int , snake_case__ : int = 0 , snake_case__ : int = -1 ): if hi < 0: A = len(snake_case__ ) while lo < hi: A = lo + (hi - lo) // 2 if sorted_collection[mid] < item: A = mid + 1 else: A = mid return lo def _snake_case ( snake_case__ : list[int] , snake_case__ : int , snake_case__ : int = 0 , snake_case__ : int = -1 ): if hi < 0: A = len(snake_case__ ) while lo < hi: A = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: A = mid + 1 else: A = mid return lo def _snake_case ( snake_case__ : list[int] , snake_case__ : int , snake_case__ : int = 0 , snake_case__ : int = -1 ): sorted_collection.insert(bisect_left(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , snake_case__ ) def _snake_case ( snake_case__ : list[int] , snake_case__ : int , snake_case__ : int = 0 , snake_case__ : int = -1 ): sorted_collection.insert(bisect_right(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , snake_case__ ) def _snake_case ( snake_case__ : list[int] , snake_case__ : int ): A = 0 A = len(snake_case__ ) - 1 while left <= right: A = left + (right - left) // 2 A = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: A = midpoint - 1 else: A = midpoint + 1 return None def _snake_case ( snake_case__ : list[int] , snake_case__ : int ): A = bisect.bisect_left(snake_case__ , snake_case__ ) if index != len(snake_case__ ) and sorted_collection[index] == item: return index return None def _snake_case ( snake_case__ : list[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if right < left: return None A = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(snake_case__ , snake_case__ , snake_case__ , midpoint - 1 ) else: return binary_search_by_recursion(snake_case__ , snake_case__ , midpoint + 1 , snake_case__ ) if __name__ == "__main__": _lowercase = input('''Enter numbers separated by comma:\n''').strip() _lowercase = sorted(int(item) for item in user_input.split(''',''')) _lowercase = int(input('''Enter a single number to be found in the list:\n''')) _lowercase = binary_search(collection, target) if result is None: print(F"""{target} was not found in {collection}.""") else: print(F"""{target} was found at position {result} in {collection}.""")
719
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def _snake_case ( snake_case__ : Optional[int] ): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' ) download_parser.add_argument( '--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,) download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' ) download_parser.set_defaults(func=A_ ) def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]: A = model A = cache A = force A = trust_remote_code def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
22
0
"""simple docstring""" import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCAmelCase_ : '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( *A_ : str ,**A_ : Tuple ) -> str: pass @is_pipeline_test @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: A = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' ,) A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) A = image_classifier(A_ ,candidate_labels=['a', 'b', 'c'] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(A_ ) ,[ [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}], [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}], ] ,) A = image_classifier([image] * 5 ,candidate_labels=['A', 'B', 'C'] ,batch_size=2 ) self.assertEqual( nested_simplify(A_ ) ,[ [ {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, ], [ {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, ], [ {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, ], [ {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, ], [ {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, ], ] ,) @require_tf def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' ,framework='tf' ) A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) A = image_classifier(A_ ,candidate_labels=['a', 'b', 'c'] ) self.assertEqual( nested_simplify(A_ ) ,[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] ,) A = image_classifier([image] * 5 ,candidate_labels=['A', 'B', 'C'] ,batch_size=2 ) self.assertEqual( nested_simplify(A_ ) ,[ [ {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, ], [ {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, ], [ {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, ], [ {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, ], [ {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, {'score': 0.3_33, 'label': ANY(A_ )}, ], ] ,) @slow @require_torch def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = pipeline( task='zero-shot-image-classification' ,model='openai/clip-vit-base-patch32' ,) # This is an image of 2 cats with remotes and no planes A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) A = image_classifier(A_ ,candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(A_ ) ,[ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ] ,) A = image_classifier([image] * 5 ,candidate_labels=['cat', 'plane', 'remote'] ,batch_size=2 ) self.assertEqual( nested_simplify(A_ ) ,[ [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ] * 5 ,) @slow @require_tf def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = pipeline( task='zero-shot-image-classification' ,model='openai/clip-vit-base-patch32' ,framework='tf' ) # This is an image of 2 cats with remotes and no planes A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) A = image_classifier(A_ ,candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(A_ ) ,[ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ] ,) A = image_classifier([image] * 5 ,candidate_labels=['cat', 'plane', 'remote'] ,batch_size=2 ) self.assertEqual( nested_simplify(A_ ) ,[ [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ] * 5 ,)
720
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''spm_char.model'''} _lowercase = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } _lowercase = { '''microsoft/speecht5_asr''': 10_24, '''microsoft/speecht5_tts''': 10_24, '''microsoft/speecht5_vc''': 10_24, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None: A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = vocab_file A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.sp_model.get_piece_size() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Any: A = self.__dict__.copy() A = None return state def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]: return self.sp_model.piece_to_id(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]: A = self.sp_model.IdToPiece(A_ ) return token def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]: A = [] A = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A_ ) + token A = [] else: current_sub_tokens.append(A_ ) out_string += self.sp_model.decode(A_ ) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) A = [1] if token_ids_a is None: return ([0] * len(A_ )) + suffix_ones return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
22
0
"""simple docstring""" def _snake_case ( snake_case__ : list ): if len(snake_case__ ) <= 1: return lst A = 1 while i < len(snake_case__ ): if lst[i - 1] <= lst[i]: i += 1 else: A , A = lst[i], lst[i - 1] i -= 1 if i == 0: A = 1 return lst if __name__ == "__main__": _lowercase = input('''Enter numbers separated by a comma:\n''').strip() _lowercase = [int(item) for item in user_input.split(''',''')] print(gnome_sort(unsorted))
721
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPFeatureExtractor'''] _lowercase = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
0
"""simple docstring""" import baseaa def _snake_case ( snake_case__ : str ): return baseaa.baaencode(string.encode('utf-8' ) ) def _snake_case ( snake_case__ : bytes ): return baseaa.baadecode(snake_case__ ).decode('utf-8' ) if __name__ == "__main__": _lowercase = '''Hello World!''' _lowercase = baseaa_encode(test) print(encoded) _lowercase = baseaa_decode(encoded) print(decoded)
700
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: Dict = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[Any] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
22
0
"""simple docstring""" import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowercase = { '''facebook/mask2former-swin-small-coco-instance''': ( '''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json''' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = '''mask2former''' _lowerCamelCase: Dict = ['''swin'''] _lowerCamelCase: Union[str, Any] = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Any ,A_ : Optional[Dict] = None ,A_ : int = 256 ,A_ : int = 256 ,A_ : int = 256 ,A_ : int = 1024 ,A_ : str = "relu" ,A_ : int = 6 ,A_ : int = 10 ,A_ : int = 8 ,A_ : float = 0.0 ,A_ : int = 2048 ,A_ : bool = False ,A_ : bool = False ,A_ : int = 4 ,A_ : int = 255 ,A_ : int = 100 ,A_ : float = 0.1 ,A_ : float = 2.0 ,A_ : float = 5.0 ,A_ : float = 5.0 ,A_ : int = 1_2544 ,A_ : float = 3.0 ,A_ : float = 0.75 ,A_ : float = 0.02 ,A_ : float = 1.0 ,A_ : bool = True ,A_ : List[int] = [4, 8, 16, 32] ,A_ : bool = None ,**A_ : Optional[int] ,) -> Optional[int]: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) A = CONFIG_MAPPING['swin']( image_size=224 ,in_channels=3 ,patch_size=4 ,embed_dim=96 ,depths=[2, 2, 18, 2] ,num_heads=[3, 6, 12, 24] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=A_ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,) if isinstance(A_ ,A_ ): A = backbone_config.pop('model_type' ) A = CONFIG_MAPPING[backbone_model_type] A = config_class.from_dict(A_ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ' F'Supported model types: {",".join(self.backbones_supported )}' ) A = backbone_config A = feature_size A = mask_feature_size A = hidden_dim A = encoder_feedforward_dim A = activation_function A = encoder_layers A = decoder_layers A = num_attention_heads A = dropout A = dim_feedforward A = pre_norm A = enforce_input_projection A = common_stride A = ignore_value A = num_queries A = no_object_weight A = class_weight A = mask_weight A = dice_weight A = train_num_points A = oversample_ratio A = importance_sample_ratio A = init_std A = init_xavier_std A = use_auxiliary_loss A = feature_strides A = output_auxiliary_logits A = decoder_layers super().__init__(**A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : PretrainedConfig ,**A_ : Dict ) -> Union[str, Any]: return cls( backbone_config=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict[str, any]: A = copy.deepcopy(self.__dict__ ) A = self.backbone_config.to_dict() A = self.__class__.model_type return output
701
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _lowercase = 8 def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ): A = x.device A = (x * 255).int().clamp(0 , 255 ) A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' ) A = ((x & mask) != 0).float() A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' ) A = bits * 2 - 1 return bits def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ): A = x.device A = (x > 0).int() A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 ) A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 255).clamp(0.0 , 1.0 ) def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ): if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) A = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas A = self.alphas_cumprod[timestep] A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod A = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) A = self._get_variance(snake_case__ , snake_case__ ) A = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu' A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ ) A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise A = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ): A = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 ) else: A = None # 1. compute alphas, betas A = self.alphas_cumprod[t] A = self.alphas_cumprod[t - 1] if t > 0 else self.one A = 1 - alpha_prod_t A = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": A = model_output else: raise ValueError(F'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A = 0 if t > 0: A = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device ) A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise A = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]: super().__init__() A = bit_scale A = ( ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=A_ ,scheduler=A_ ) @torch.no_grad() def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]: A = torch.randn( (batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,) A = decimal_to_bits(A_ ) * self.bit_scale A = latents.to(self.device ) self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual A = self.unet(A_ ,A_ ).sample # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample A = bits_to_decimal(A_ ) if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
22
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
702
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''yolos''' def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = num_detection_tokens A = use_mid_position_embeddings A = auxiliary_loss # Hungarian matcher A = class_cost A = bbox_cost A = giou_cost # Loss coefficients A = bbox_loss_coefficient A = giou_loss_coefficient A = eos_coefficient class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> float: return 1e-4 @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: return 12
22
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase = { '''configuration_conditional_detr''': [ '''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConditionalDetrConfig''', '''ConditionalDetrOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ConditionalDetrFeatureExtractor'''] _lowercase = ['''ConditionalDetrImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConditionalDetrForObjectDetection''', '''ConditionalDetrForSegmentation''', '''ConditionalDetrModel''', '''ConditionalDetrPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
703
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
22
0
"""simple docstring""" import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values _lowercase = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') _lowercase , _lowercase = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') _lowercase = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: _lowercase = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) _lowercase = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
704
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Dict = ['''pixel_values'''] def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None: super().__init__(**A_ ) A = size if size is not None else {'shortest_edge': 256} A = get_size_dict(A_ ,default_to_square=A_ ) A = crop_size if crop_size is not None else {'height': 224, 'width': 224} A = get_size_dict(A_ ,param_name='crop_size' ) A = do_resize A = size A = resample A = do_center_crop A = crop_size A = do_rescale A = rescale_factor A = do_normalize A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray: A = get_size_dict(A_ ,default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ ) return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray: A = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray: return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray: return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]: A = do_resize if do_resize is not None else self.do_resize A = size if size is not None else self.size A = get_size_dict(A_ ,default_to_square=A_ ) A = resample if resample is not None else self.resample A = do_center_crop if do_center_crop is not None else self.do_center_crop A = crop_size if crop_size is not None else self.crop_size A = get_size_dict(A_ ,param_name='crop_size' ) A = do_rescale if do_rescale is not None else self.do_rescale A = rescale_factor if rescale_factor is not None else self.rescale_factor A = do_normalize if do_normalize is not None else self.do_normalize A = image_mean if image_mean is not None else self.image_mean A = image_std if image_std is not None else self.image_std A = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. A = [to_numpy_array(A_ ) for image in images] if do_resize: A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images] if do_center_crop: A = [self.center_crop(image=A_ ,size=A_ ) for image in images] if do_rescale: A = [self.rescale(image=A_ ,scale=A_ ) for image in images] if do_normalize: A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images] A = [to_channel_dimension_format(A_ ,A_ ) for image in images] A = {'pixel_values': images} return BatchFeature(data=A_ ,tensor_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str: A = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(A_ ): A = target_sizes.numpy() A = [] for idx in range(len(A_ ) ): A = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ ) A = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: A = logits.argmax(dim=1 ) A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
705
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 _lowercase = data_utils.TransfoXLTokenizer _lowercase = data_utils.TransfoXLCorpus _lowercase = data_utils _lowercase = data_utils def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(snake_case__ , 'rb' ) as fp: A = pickle.load(snake_case__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'Save vocabulary to {pytorch_vocab_dump_path}' ) A = corpus.vocab.__dict__ torch.save(snake_case__ , snake_case__ ) A = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , snake_case__ ) A = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(snake_case__ , snake_case__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model A = os.path.abspath(snake_case__ ) A = os.path.abspath(snake_case__ ) print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": A = TransfoXLConfig() else: A = TransfoXLConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) A = TransfoXLLMHeadModel(snake_case__ ) A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model A = os.path.join(snake_case__ , snake_case__ ) A = os.path.join(snake_case__ , snake_case__ ) print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' ) torch.save(model.state_dict() , snake_case__ ) print(F'Save configuration file to {os.path.abspath(snake_case__ )}' ) with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) _lowercase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
22
0
"""simple docstring""" def _snake_case ( snake_case__ : int = 10**9 ): A = 1 A = 2 A = 0 A = 0 A = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value A = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F"""{solution() = }""")
706
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ) -> int: A = {} def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int: if self.graph.get(A_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A = [[w, v]] if not self.graph.get(A_ ): A = [] def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return list(self.graph ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]: if self.graph.get(A_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str: if s == d: return [] A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return visited def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int: if c == -1: A = floor(random() * 1_0000 ) + 10 for i in range(A_ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A = floor(random() * c ) + 1 if n != i: self.add_pair(A_ ,A_ ,1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]: A = deque() A = [] if s == -2: A = list(self.graph )[0] d.append(A_ ) visited.append(A_ ) while d: A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any: A = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str: return len(self.graph[u] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any: A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s A = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return sorted_nodes def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return False def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str: A = time() self.dfs(A_ ,A_ ) A = time() return end - begin def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict: A = time() self.bfs(A_ ) A = time() return end - begin class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ) -> Tuple: A = {} def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict: # check if the u exists if self.graph.get(A_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A = [[w, v]] # add the other way if self.graph.get(A_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A = [[w, u]] def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]: if self.graph.get(A_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A_ ) # the other way round if self.graph.get(A_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int: if s == d: return [] A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]: if c == -1: A = floor(random() * 1_0000 ) + 10 for i in range(A_ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A = floor(random() * c ) + 1 if n != i: self.add_pair(A_ ,A_ ,1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]: A = deque() A = [] if s == -2: A = list(self.graph )[0] d.append(A_ ) visited.append(A_ ) while d: A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]: return len(self.graph[u] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: return list(self.graph ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any: A = time() self.dfs(A_ ,A_ ) A = time() return end - begin def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]: A = time() self.bfs(A_ ) A = time() return end - begin
22
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''spm_char.model'''} _lowercase = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } _lowercase = { '''microsoft/speecht5_asr''': 10_24, '''microsoft/speecht5_tts''': 10_24, '''microsoft/speecht5_vc''': 10_24, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None: A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = vocab_file A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.sp_model.get_piece_size() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Any: A = self.__dict__.copy() A = None return state def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]: return self.sp_model.piece_to_id(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]: A = self.sp_model.IdToPiece(A_ ) return token def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]: A = [] A = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A_ ) + token A = [] else: current_sub_tokens.append(A_ ) out_string += self.sp_model.decode(A_ ) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) A = [1] if token_ids_a is None: return ([0] * len(A_ )) + suffix_ones return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
707
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _snake_case ( snake_case__ : str = "isbn/0140328726" ): A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: A = F'{olid} is not a valid Open Library olid' raise ValueError(snake_case__ ) return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json() def _snake_case ( snake_case__ : dict ): A = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} A = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] A = data['First sentence']['value'] for key, value in data.items(): if isinstance(snake_case__ , snake_case__ ): A = ', '.join(snake_case__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: _lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(F"""\nSearching Open Library for ISBN: {isbn}...\n""") try: _lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}""")) print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"""Sorry, there are no results for ISBN: {isbn}.""")
22
0
"""simple docstring""" import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : Union[str, Any] ) -> Dict: if isinstance(A_ ,A_ ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden A = deepcopy(A_ ) elif os.path.exists(A_ ): with io.open(A_ ,'r' ,encoding='utf-8' ) as f: A = json.load(A_ ) else: try: A = baseaa.urlsafe_baadecode(A_ ).decode('utf-8' ) A = json.loads(A_ ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' ) A = config self.set_stage_and_offload() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. A = self.get_value('zero_optimization.stage' ,-1 ) # offload A = False if self.is_zeroa() or self.is_zeroa(): A = set(['cpu', 'nvme'] ) A = set( [ self.get_value('zero_optimization.offload_optimizer.device' ), self.get_value('zero_optimization.offload_param.device' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: A = True def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ) -> List[str]: A = self.config # find the config node of interest if it exists A = ds_key_long.split('.' ) A = nodes.pop() for node in nodes: A = config.get(A_ ) if config is None: return None, ds_key return config, ds_key def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : List[str]=None ) -> Optional[int]: A , A = self.find_config_node(A_ ) if config is None: return default return config.get(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : Any=False ) -> Dict: A = self.config # find the config node of interest if it exists A = ds_key_long.split('.' ) for node in nodes: A = config A = config.get(A_ ) if config is None: if must_exist: raise ValueError(F'Can\'t find {ds_key_long} entry in the config: {self.config}' ) else: return # if found remove it if parent_config is not None: parent_config.pop(A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Tuple: A = self.get_value(A_ ) return False if value is None else bool(A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> Union[str, Any]: A = self.get_value(A_ ) return False if value is None else not bool(A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> str: return self._stage == 2 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: return self._stage == 3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: return self._offload class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ,A_ : str ) -> Union[str, Any]: A = engine def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ,**A_ : int ) -> Tuple: # runs backpropagation and handles mixed precision self.engine.backward(A_ ,**A_ ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : List[Any] ,A_ : str ) -> List[Any]: super().__init__(A_ ,device_placement=A_ ,scaler=A_ ) A = hasattr(self.optimizer ,'overflow' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Dict=None ) -> List[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: if self.__has_overflow__: return self.optimizer.overflow return False class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : List[str] ,A_ : int ,A_ : int ) -> Dict: super().__init__(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class lowerCAmelCase_ : '''simple docstring''' def __init__( self : str ,A_ : str ,A_ : List[str]=0.0_01 ,A_ : Tuple=0 ,**A_ : Tuple ) -> str: A = params A = lr A = weight_decay A = kwargs class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] ,A_ : Dict ,A_ : int=None ,A_ : List[Any]=0 ,**A_ : Optional[int] ) -> Any: A = optimizer A = total_num_steps A = warmup_num_steps A = kwargs
708
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''], '''tokenization_perceiver''': ['''PerceiverTokenizer'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''PerceiverFeatureExtractor'''] _lowercase = ['''PerceiverImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PerceiverForImageClassificationConvProcessing''', '''PerceiverForImageClassificationFourier''', '''PerceiverForImageClassificationLearned''', '''PerceiverForMaskedLM''', '''PerceiverForMultimodalAutoencoding''', '''PerceiverForOpticalFlow''', '''PerceiverForSequenceClassification''', '''PerceiverLayer''', '''PerceiverModel''', '''PerceiverPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
0
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() _lowercase = logging.get_logger('''transformers.models.speecht5''') _lowercase = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } _lowercase = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } _lowercase = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } _lowercase = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } _lowercase = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } _lowercase = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } _lowercase = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } _lowercase = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } _lowercase = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } _lowercase = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _lowercase = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _lowercase = [] _lowercase = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] _lowercase = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] _lowercase = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] _lowercase = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def _snake_case ( snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Any ): for attribute in key.split('.' ): A = getattr(snake_case__ , snake_case__ ) if weight_type is not None: A = getattr(snake_case__ , snake_case__ ).shape else: A = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": A = value elif weight_type == "weight_g": A = value elif weight_type == "weight_v": A = value elif weight_type == "bias": A = value elif weight_type == "running_mean": A = value elif weight_type == "running_var": A = value elif weight_type == "num_batches_tracked": A = value else: A = value logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def _snake_case ( snake_case__ : int , snake_case__ : str ): for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: A , A = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _snake_case ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): A = [] if task == "s2t": A = hf_model.speechta.encoder.prenet.feature_encoder A = MAPPING_S2T A = IGNORE_KEYS_S2T elif task == "t2s": A = None A = MAPPING_T2S A = IGNORE_KEYS_T2S elif task == "s2s": A = hf_model.speechta.encoder.prenet.feature_encoder A = MAPPING_S2S A = IGNORE_KEYS_S2S else: raise ValueError(F'Unsupported task: {task}' ) for name, value in fairseq_dict.items(): if should_ignore(snake_case__ , snake_case__ ): logger.info(F'{name} was ignored' ) continue A = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , ) A = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: A , A = key.split('.*.' ) if prefix in name and suffix in name: A = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: A = True if "*" in mapped_key: A = name.split(snake_case__ )[0].split('.' )[-2] A = mapped_key.replace('*' , snake_case__ ) if "weight_g" in name: A = 'weight_g' elif "weight_v" in name: A = 'weight_v' elif "bias" in name: A = 'bias' elif "weight" in name: A = 'weight' elif "running_mean" in name: A = 'running_mean' elif "running_var" in name: A = 'running_var' elif "num_batches_tracked" in name: A = 'num_batches_tracked' else: A = None set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) continue if not is_used: unused_weights.append(snake_case__ ) logger.warning(F'Unused weights: {unused_weights}' ) def _snake_case ( snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : int ): A = full_name.split('conv_layers.' )[-1] A = name.split('.' ) A = int(items[0] ) A = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) A = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) A = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) A = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) A = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(snake_case__ ) @torch.no_grad() def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : int=None , snake_case__ : Tuple=None , snake_case__ : int=None , ): if config_path is not None: A = SpeechTaConfig.from_pretrained(snake_case__ ) else: A = SpeechTaConfig() if task == "s2t": A = config.max_text_positions A = SpeechTaForSpeechToText(snake_case__ ) elif task == "t2s": A = 1876 A = 600 A = config.max_speech_positions A = SpeechTaForTextToSpeech(snake_case__ ) elif task == "s2s": A = 1876 A = config.max_speech_positions A = SpeechTaForSpeechToSpeech(snake_case__ ) else: raise ValueError(F'Unknown task name: {task}' ) if vocab_path: A = SpeechTaTokenizer(snake_case__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it A = AddedToken('<mask>' , lstrip=snake_case__ , rstrip=snake_case__ ) A = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) A = SpeechTaFeatureExtractor() A = SpeechTaProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ ) processor.save_pretrained(snake_case__ ) A = torch.load(snake_case__ ) recursively_load_weights(fairseq_checkpoint['model'] , snake_case__ , snake_case__ ) model.save_pretrained(snake_case__ ) if repo_id: print('Pushing to the hub...' ) processor.push_to_hub(snake_case__ ) model.push_to_hub(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) _lowercase = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
709
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def _snake_case ( snake_case__ : int ): A = SwinvaConfig() A = swinva_name.split('_' ) A = name_split[1] if "to" in name_split[3]: A = int(name_split[3][-3:] ) else: A = int(name_split[3] ) if "to" in name_split[2]: A = int(name_split[2][-2:] ) else: A = int(name_split[2][6:] ) if model_size == "tiny": A = 96 A = (2, 2, 6, 2) A = (3, 6, 12, 24) elif model_size == "small": A = 96 A = (2, 2, 18, 2) A = (3, 6, 12, 24) elif model_size == "base": A = 128 A = (2, 2, 18, 2) A = (4, 8, 16, 32) else: A = 192 A = (2, 2, 18, 2) A = (6, 12, 24, 48) if "to" in swinva_name: A = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): A = 2_1841 A = 'huggingface/label-files' A = 'imagenet-22k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} else: A = 1000 A = 'huggingface/label-files' A = 'imagenet-1k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} A = img_size A = num_classes A = embed_dim A = depths A = num_heads A = window_size return config def _snake_case ( snake_case__ : List[Any] ): if "patch_embed.proj" in name: A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: A = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: A = 'encoder.' + name if "attn.proj" in name: A = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A = name.replace('attn' , 'attention.self' ) if "norm1" in name: A = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: A = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: A = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: A = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if name == "norm.weight": A = 'layernorm.weight' if name == "norm.bias": A = 'layernorm.bias' if "head" in name: A = name.replace('head' , 'classifier' ) else: A = 'swinv2.' + name return name def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ): for key in orig_state_dict.copy().keys(): A = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A = key.split('.' ) A = int(key_split[1] ) A = int(key_split[3] ) A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A = val[:dim, :] A = val[dim : dim * 2, :] A = val[-dim:, :] else: A = val[:dim] A = val[ dim : dim * 2 ] A = val[-dim:] else: A = val return orig_state_dict def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ): A = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A = get_swinva_config(snake_case__ ) A = SwinvaForImageClassification(snake_case__ ) model.eval() A = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) ) A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A = image_processor(images=snake_case__ , return_tensors='pt' ) A = timm_model(inputs['pixel_values'] ) A = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) model.push_to_hub( repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swinv2_name''', default='''swinv2_tiny_patch4_window8_256''', type=str, help='''Name of the Swinv2 timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowercase = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
22
0
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _snake_case ( snake_case__ : List[Any] ): A = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] A = True if 'large' in model_name or 'huge' in model_name else False A = True if 'large' in model_name or 'huge' in model_name else False A = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: A = [3, 3, 3, 3] A = [5, 5, 5, 5] elif "fl4" in model_name: A = [4, 4, 4, 4] A = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: A = [3, 3, 3, 3] if "lrf" in model_name: A = [3, 3, 3, 3] else: A = [2, 2, 2, 2] if "tiny" in model_name: A = 96 elif "small" in model_name: A = 96 elif "base" in model_name: A = 128 elif "large" in model_name: A = 192 elif "xlarge" in model_name: A = 256 elif "huge" in model_name: A = 352 # set label information A = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: A = 'imagenet-22k-id2label.json' else: A = 'imagenet-1k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = {v: k for k, v in idalabel.items()} A = FocalNetConfig( embed_dim=snake_case__ , depths=snake_case__ , focal_levels=snake_case__ , focal_windows=snake_case__ , use_conv_embed=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , use_post_layernorm=snake_case__ , use_layerscale=snake_case__ , ) return config def _snake_case ( snake_case__ : int ): if "patch_embed.proj" in name: A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: A = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: A = 'encoder.' + name if "encoder.layers" in name: A = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: A = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: A = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: A = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: A = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: A = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": A = 'layernorm.weight' if name == "norm.bias": A = 'layernorm.bias' if "head" in name: A = name.replace('head' , 'classifier' ) else: A = 'focalnet.' + name return name def _snake_case ( snake_case__ : str , snake_case__ : Any , snake_case__ : Dict=False ): # fmt: off A = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on A = model_name_to_url[model_name] print('Checkpoint URL: ' , snake_case__ ) A = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): A = state_dict.pop(snake_case__ ) A = val A = get_focalnet_config(snake_case__ ) A = FocalNetForImageClassification(snake_case__ ) model.eval() # load state dict model.load_state_dict(snake_case__ ) # verify conversion A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = BitImageProcessor( do_resize=snake_case__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case__ , crop_size=224 , do_normalize=snake_case__ , image_mean=snake_case__ , image_std=snake_case__ , ) A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A = processor(images=snake_case__ , return_tensors='pt' ) A = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) A = image_transforms(snake_case__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , snake_case__ , atol=1e-4 ) A = model(**snake_case__ ) A = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": A = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": A = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": A = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": A = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": A = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": A = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) if push_to_hub: print(F'Pushing model and processor of {model_name} to the hub...' ) model.push_to_hub(F'{model_name}' ) processor.push_to_hub(F'{model_name}' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) _lowercase = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
710
"""simple docstring""" from math import pi, sqrt def _snake_case ( snake_case__ : float ): if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(snake_case__ ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(snake_case__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _snake_case ( ): assert gamma(0.5 ) == sqrt(snake_case__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _lowercase = 1.0 while num: _lowercase = float(input('''Gamma of: ''')) print(F"""gamma({num}) = {gamma(num)}""") print('''\nEnter 0 to exit...''')
22
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) _lowercase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''adapter_layer''': '''encoder.layers.*.adapter_layer''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', '''pooling_layer.linear''': '''projector''', '''pooling_layer.projection''': '''classifier''', } _lowercase = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''projector''', '''classifier''', ] def _snake_case ( snake_case__ : List[Any] ): A = {} with open(snake_case__ , 'r' ) as file: for line_number, line in enumerate(snake_case__ ): A = line.strip() if line: A = line.split() A = line_number A = words[0] A = value return result def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Dict ): for attribute in key.split('.' ): A = getattr(snake_case__ , snake_case__ ) A = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case__ ): A = PARAM_MAPPING[full_name.split('.' )[-1]] A = 'param' if weight_type is not None and weight_type != "param": A = getattr(snake_case__ , snake_case__ ).shape elif weight_type is not None and weight_type == "param": A = hf_pointer for attribute in hf_param_name.split('.' ): A = getattr(snake_case__ , snake_case__ ) A = shape_pointer.shape # let's reduce dimension A = value[0] else: A = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": A = value elif weight_type == "weight_g": A = value elif weight_type == "weight_v": A = value elif weight_type == "bias": A = value elif weight_type == "param": for attribute in hf_param_name.split('.' ): A = getattr(snake_case__ , snake_case__ ) A = value else: A = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ): A = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case__ ): A = PARAM_MAPPING[full_name.split('.' )[-1]] A = 'param' if weight_type is not None and weight_type != "param": A = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": A = '.'.join([key, hf_param_name] ) else: A = key A = value if 'lm_head' in full_key else value[0] _lowercase = { '''W_a''': '''linear_1.weight''', '''W_b''': '''linear_2.weight''', '''b_a''': '''linear_1.bias''', '''b_b''': '''linear_2.bias''', '''ln_W''': '''norm.weight''', '''ln_b''': '''norm.bias''', } def _snake_case ( snake_case__ : List[str] , snake_case__ : Any , snake_case__ : List[Any]=None , snake_case__ : int=None ): A = False for key, mapped_key in MAPPING.items(): A = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: A = True if "*" in mapped_key: A = name.split(snake_case__ )[0].split('.' )[-2] A = mapped_key.replace('*' , snake_case__ ) if "weight_g" in name: A = 'weight_g' elif "weight_v" in name: A = 'weight_v' elif "bias" in name: A = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj A = 'weight' else: A = None if hf_dict is not None: rename_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) else: set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) return is_used return is_used def _snake_case ( snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : int ): A = [] A = fairseq_model.state_dict() A = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): A = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , ) A = True else: A = load_wavaveca_layer(snake_case__ , snake_case__ , snake_case__ ) if not is_used: unused_weights.append(snake_case__ ) logger.warning(F'Unused weights: {unused_weights}' ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] ): A = full_name.split('conv_layers.' )[-1] A = name.split('.' ) A = int(items[0] ) A = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) A = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) A = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) A = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) A = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(snake_case__ ) @torch.no_grad() def _snake_case ( snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Tuple=None , snake_case__ : Tuple=True , snake_case__ : Optional[Any]=False ): if config_path is not None: A = WavaVecaConfig.from_pretrained(snake_case__ ) else: A = WavaVecaConfig() if is_seq_class: A = read_txt_into_dict(snake_case__ ) A = idalabel A = WavaVecaForSequenceClassification(snake_case__ ) A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) feature_extractor.save_pretrained(snake_case__ ) elif is_finetuned: if dict_path: A = Dictionary.load(snake_case__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq A = target_dict.pad_index A = target_dict.bos_index A = target_dict.eos_index A = len(target_dict.symbols ) A = os.path.join(snake_case__ , 'vocab.json' ) if not os.path.isdir(snake_case__ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(snake_case__ ) ) return os.makedirs(snake_case__ , exist_ok=snake_case__ ) A = target_dict.indices # fairseq has the <pad> and <s> switched A = 0 A = 1 with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(snake_case__ , snake_case__ ) A = WavaVecaCTCTokenizer( snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=snake_case__ , ) A = True if config.feat_extract_norm == 'layer' else False A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) A = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ ) processor.save_pretrained(snake_case__ ) A = WavaVecaForCTC(snake_case__ ) else: A = WavaVecaForPreTraining(snake_case__ ) if is_finetuned or is_seq_class: A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: A = argparse.Namespace(task='audio_pretraining' ) A = fairseq.tasks.setup_task(snake_case__ ) A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case__ ) A = model[0].eval() recursively_load_weights(snake_case__ , snake_case__ , not is_finetuned ) hf_wavavec.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) parser.add_argument( '''--is_seq_class''', action='''store_true''', help='''Whether the model to convert is a fine-tuned sequence classification model or not''', ) _lowercase = parser.parse_args() _lowercase = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
711
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: torch.FloatTensor class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]: super().__init__() A = layers_per_block A = torch.nn.Convad( A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,) A = None A = nn.ModuleList([] ) # down A = block_out_channels[0] for i, down_block_type in enumerate(A_ ): A = output_channel A = block_out_channels[i] A = i == len(A_ ) - 1 A = get_down_block( A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,) self.down_blocks.append(A_ ) # mid A = UNetMidBlockaD( in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,) # out A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 ) A = nn.SiLU() A = 2 * out_channels if double_z else out_channels A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 ) A = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]: A = x A = self.conv_in(A_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(A_ : Dict ): def custom_forward(*A_ : Tuple ): return module(*A_ ) return custom_forward # down if is_torch_version('>=' ,'1.11.0' ): for down_block in self.down_blocks: A = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ ) # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ ) else: for down_block in self.down_blocks: A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ) # middle A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ ) else: # down for down_block in self.down_blocks: A = down_block(A_ ) # middle A = self.mid_block(A_ ) # post-process A = self.conv_norm_out(A_ ) A = self.conv_act(A_ ) A = self.conv_out(A_ ) return sample class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any: super().__init__() A = layers_per_block A = nn.Convad( A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,) A = None A = nn.ModuleList([] ) A = in_channels if norm_type == 'spatial' else None # mid A = UNetMidBlockaD( in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,) # up A = list(reversed(A_ ) ) A = reversed_block_out_channels[0] for i, up_block_type in enumerate(A_ ): A = output_channel A = reversed_block_out_channels[i] A = i == len(A_ ) - 1 A = get_up_block( A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,) self.up_blocks.append(A_ ) A = output_channel # out if norm_type == "spatial": A = SpatialNorm(block_out_channels[0] ,A_ ) else: A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 ) A = nn.SiLU() A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 ) A = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any: A = z A = self.conv_in(A_ ) A = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(A_ : List[Any] ): def custom_forward(*A_ : Tuple ): return module(*A_ ) return custom_forward if is_torch_version('>=' ,'1.11.0' ): # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ ) else: # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ ) else: # middle A = self.mid_block(A_ ,A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = up_block(A_ ,A_ ) # post-process if latent_embeds is None: A = self.conv_norm_out(A_ ) else: A = self.conv_norm_out(A_ ,A_ ) A = self.conv_act(A_ ) A = self.conv_out(A_ ) return sample class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]: super().__init__() A = n_e A = vq_embed_dim A = beta A = legacy A = nn.Embedding(self.n_e ,self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e ) A = remap if self.remap is not None: self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) ) A = self.used.shape[0] A = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": A = self.re_embed A = self.re_embed + 1 print( F'Remapping {self.n_e} indices to {self.re_embed} indices. ' F'Using {self.unknown_index} for unknown indices.' ) else: A = n_e A = sane_index_shape def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any: A = inds.shape assert len(A_ ) > 1 A = inds.reshape(ishape[0] ,-1 ) A = self.used.to(A_ ) A = (inds[:, :, None] == used[None, None, ...]).long() A = match.argmax(-1 ) A = match.sum(2 ) < 1 if self.unknown_index == "random": A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device ) else: A = self.unknown_index return new.reshape(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = inds.shape assert len(A_ ) > 1 A = inds.reshape(ishape[0] ,-1 ) A = self.used.to(A_ ) if self.re_embed > self.used.shape[0]: # extra token A = 0 # simply set to zero A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ ) return back.reshape(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str: # reshape z -> (batch, height, width, channel) and flatten A = z.permute(0 ,2 ,3 ,1 ).contiguous() A = z.view(-1 ,self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 ) A = self.embedding(A_ ).view(z.shape ) A = None A = None # compute loss for embedding if not self.legacy: A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients A = z + (z_q - z).detach() # reshape back to match original input shape A = z_q.permute(0 ,3 ,1 ,2 ).contiguous() if self.remap is not None: A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis A = self.remap_to_used(A_ ) A = min_encoding_indices.reshape(-1 ,1 ) # flatten if self.sane_index_shape: A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]: # shape specifying (batch, height, width, channel) if self.remap is not None: A = indices.reshape(shape[0] ,-1 ) # add batch axis A = self.unmap_to_all(A_ ) A = indices.reshape(-1 ) # flatten again # get quantized latent vectors A = self.embedding(A_ ) if shape is not None: A = z_q.view(A_ ) # reshape back to match original input shape A = z_q.permute(0 ,3 ,1 ,2 ).contiguous() return z_q class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]: A = parameters A , A = torch.chunk(A_ ,2 ,dim=1 ) A = torch.clamp(self.logvar ,-30.0 ,20.0 ) A = deterministic A = torch.exp(0.5 * self.logvar ) A = torch.exp(self.logvar ) if self.deterministic: A = A = torch.zeros_like( self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype A = randn_tensor( self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype ) A = self.mean + self.std * sample return x def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean ,2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar ,dim=[1, 2, 3] ,) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]: if self.deterministic: return torch.Tensor([0.0] ) A = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: return self.mean
22
0
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[Any] = LEDTokenizer _lowerCamelCase: Optional[int] = LEDTokenizerFast _lowerCamelCase: Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: super().setUp() A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A = {'unk_token': '<unk>'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : int ) -> str: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,**A_ : Any ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[Any] ) -> Any: return "lower newer", "lower newer" @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: return LEDTokenizer.from_pretrained('allenai/led-base-16384' ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ) -> str: return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] A = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A = tokenizer(A_ ,max_length=len(A_ ) ,padding=A_ ,return_tensors='pt' ) self.assertIsInstance(A_ ,A_ ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) A = batch.input_ids.tolist()[0] self.assertListEqual(A_ ,A_ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A = tokenizer(A_ ,padding=A_ ,return_tensors='pt' ) self.assertIn('input_ids' ,A_ ) self.assertIn('attention_mask' ,A_ ) self.assertNotIn('labels' ,A_ ) self.assertNotIn('decoder_attention_mask' ,A_ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: A = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A = tokenizer(text_target=A_ ,max_length=32 ,padding='max_length' ,return_tensors='pt' ) self.assertEqual(32 ,targets['input_ids'].shape[1] ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A = tokenizer( ['I am a small frog' * 1024, 'I am a small frog'] ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) self.assertIsInstance(A_ ,A_ ) self.assertEqual(batch.input_ids.shape ,(2, 5122) ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = ['A long paragraph for summarization.'] A = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A = tokenizer(A_ ,return_tensors='pt' ) A = tokenizer(text_target=A_ ,return_tensors='pt' ) A = inputs['input_ids'] A = targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A = ['Summary of the text.', 'Another summary.'] A = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] A = tokenizer(A_ ,padding=A_ ) A = [[0] * len(A_ ) for x in encoded_output['input_ids']] A = tokenizer.pad(A_ ) self.assertSequenceEqual(outputs['global_attention_mask'] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: pass def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ ) A = self.tokenizer_class.from_pretrained(A_ ,**A_ ) A = 'A, <mask> AllenNLP sentence.' A = tokenizer_r.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_ ) A = tokenizer_p.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_ ) self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) ) self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,) A = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) A = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( A_ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A_ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
712
"""simple docstring""" def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ): A = len(snake_case__ ) A = [[0] * n for i in range(snake_case__ )] for i in range(snake_case__ ): A = y_points[i] for i in range(2 , snake_case__ ): for j in range(snake_case__ , snake_case__ ): A = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
22
0
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : List[str] ,A_ : Union[str, Any] ,A_ : Any=None ,A_ : Optional[Any]=True ,A_ : List[str]=None ,**A_ : str ) -> str: A = parent A = config_class A = has_text_modality A = kwargs A = common_properties def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: A = self.config_class(**self.inputs_dict ) A = ( ['hidden_size', 'num_attention_heads', 'num_hidden_layers'] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['vocab_size'] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(A_ ,A_ ) ,msg=F'`{prop}` does not exist' ) # Test that config has the common properties as setter for idx, name in enumerate(A_ ): try: setattr(A_ ,A_ ,A_ ) self.parent.assertEqual( getattr(A_ ,A_ ) ,A_ ,msg=F'`{name} value {idx} expected, but was {getattr(A_ ,A_ )}' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(A_ ): try: A = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(A_ ,A_ ) ,A_ ,msg=F'`{name} value {idx} expected, but was {getattr(A_ ,A_ )}' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = self.config_class(**self.inputs_dict ) A = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(A_ ,'config.json' ) config_first.to_json_file(A_ ) A = self.config_class.from_json_file(A_ ) self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(A_ ) A = self.config_class.from_pretrained(A_ ) self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: A = self.config_class(**self.inputs_dict ) A = 'test' with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(A_ ,A_ ) config_first.save_pretrained(A_ ) A = self.config_class.from_pretrained(A_ ,subfolder=A_ ) self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: A = self.config_class(**self.inputs_dict ,num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) ,5 ) self.parent.assertEqual(len(config.labelaid ) ,5 ) A = 3 self.parent.assertEqual(len(config.idalabel ) ,3 ) self.parent.assertEqual(len(config.labelaid ) ,3 ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: if self.config_class.is_composition: return A = self.config_class() self.parent.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = copy.deepcopy(A_ ) A = self.config_class(**A_ ) A = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) ) elif getattr(A_ ,A_ ) != value: wrong_values.append((key, getattr(A_ ,A_ ), value) ) if len(A_ ) > 0: A = '\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] ) raise ValueError(F'The following keys were not properly set in the config:\n{errors}' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
713
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]: A = parent A = batch_size A = is_training A = use_auxiliary_loss A = num_queries A = num_channels A = min_size A = max_size A = num_labels A = hidden_dim A = hidden_dim def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( A_ ) A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ ) A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5 ).float() A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long() A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: A = MaskaFormerConfig( hidden_size=self.hidden_dim ,) A = self.num_queries A = self.num_labels A = [1, 1, 1, 1] A = self.num_channels A = 64 A = 128 A = self.hidden_dim A = self.hidden_dim A = self.hidden_dim return config def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A , A , A , A , A = self.prepare_config_and_inputs() A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]: A = output.encoder_hidden_states A = output.pixel_decoder_hidden_states A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A_ ) ,config.decoder_layers ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str: with torch.no_grad(): A = MaskaFormerModel(config=A_ ) model.to(A_ ) model.eval() A = model(pixel_values=A_ ,pixel_mask=A_ ) A = model(A_ ,output_hidden_states=A_ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]: A = MaskaFormerForUniversalSegmentation(config=A_ ) model.to(A_ ) model.eval() def comm_check_on_output(A_ : str ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): A = model(pixel_values=A_ ,pixel_mask=A_ ) A = model(A_ ) comm_check_on_output(A_ ) A = model( pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ ) comm_check_on_output(A_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} _lowerCamelCase: int = False _lowerCamelCase: Dict = False _lowerCamelCase: List[str] = False _lowerCamelCase: int = False def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = MaskaFormerModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: A = MaskaFormerModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = (self.model_tester.min_size,) * 2 A = { 'pixel_values': torch.randn((2, 3, *size) ,device=A_ ), 'mask_labels': torch.randn((2, 10, *size) ,device=A_ ), 'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(), } A = self.model_tester.get_config() A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ ) A = model(**A_ ) self.assertTrue(outputs.loss is not None ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ).to(A_ ) A = model(**A_ ,output_attentions=A_ ) self.assertTrue(outputs.attentions is not None ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: if not self.model_tester.is_training: return A = self.all_model_classes[1] A , A , A , A , A = self.model_tester.prepare_config_and_inputs() A = model_class(A_ ) model.to(A_ ) model.train() A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = self.all_model_classes[1] A , A , A , A , A = self.model_tester.prepare_config_and_inputs() A = True A = True A = model_class(A_ ).to(A_ ) model.train() A = model(A_ ,mask_labels=A_ ,class_labels=A_ ) A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=A_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowercase = 1e-4 def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ ) A = self.default_image_processor A = prepare_img() A = image_processor(A_ ,return_tensors='pt' ).to(A_ ) A = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A_ ,(1, 3, 384, 384) ) with torch.no_grad(): A = model(**A_ ) A = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) ) A = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) ) A = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval() A = self.default_image_processor A = prepare_img() A = image_processor(A_ ,return_tensors='pt' ).to(A_ ) A = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A_ ,(1, 3, 384, 384) ) with torch.no_grad(): A = model(**A_ ) # masks_queries_logits A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) A = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] A = torch.tensor(A_ ).to(A_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) ) # class_queries_logits A = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) ) A = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(A_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval() A = self.default_image_processor A = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,) A = inputs['pixel_values'].to(A_ ) A = [el.to(A_ ) for el in inputs['mask_labels']] A = [el.to(A_ ) for el in inputs['class_labels']] with torch.no_grad(): A = model(**A_ ) self.assertTrue(outputs.loss is not None )
22
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def _snake_case ( snake_case__ : int ): A = SwinvaConfig() A = swinva_name.split('_' ) A = name_split[1] if "to" in name_split[3]: A = int(name_split[3][-3:] ) else: A = int(name_split[3] ) if "to" in name_split[2]: A = int(name_split[2][-2:] ) else: A = int(name_split[2][6:] ) if model_size == "tiny": A = 96 A = (2, 2, 6, 2) A = (3, 6, 12, 24) elif model_size == "small": A = 96 A = (2, 2, 18, 2) A = (3, 6, 12, 24) elif model_size == "base": A = 128 A = (2, 2, 18, 2) A = (4, 8, 16, 32) else: A = 192 A = (2, 2, 18, 2) A = (6, 12, 24, 48) if "to" in swinva_name: A = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): A = 2_1841 A = 'huggingface/label-files' A = 'imagenet-22k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} else: A = 1000 A = 'huggingface/label-files' A = 'imagenet-1k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} A = img_size A = num_classes A = embed_dim A = depths A = num_heads A = window_size return config def _snake_case ( snake_case__ : List[Any] ): if "patch_embed.proj" in name: A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: A = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: A = 'encoder.' + name if "attn.proj" in name: A = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A = name.replace('attn' , 'attention.self' ) if "norm1" in name: A = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: A = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: A = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: A = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if name == "norm.weight": A = 'layernorm.weight' if name == "norm.bias": A = 'layernorm.bias' if "head" in name: A = name.replace('head' , 'classifier' ) else: A = 'swinv2.' + name return name def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ): for key in orig_state_dict.copy().keys(): A = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A = key.split('.' ) A = int(key_split[1] ) A = int(key_split[3] ) A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A = val[:dim, :] A = val[dim : dim * 2, :] A = val[-dim:, :] else: A = val[:dim] A = val[ dim : dim * 2 ] A = val[-dim:] else: A = val return orig_state_dict def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ): A = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A = get_swinva_config(snake_case__ ) A = SwinvaForImageClassification(snake_case__ ) model.eval() A = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) ) A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A = image_processor(images=snake_case__ , return_tensors='pt' ) A = timm_model(inputs['pixel_values'] ) A = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) model.push_to_hub( repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swinv2_name''', default='''swinv2_tiny_patch4_window8_256''', type=str, help='''Name of the Swinv2 timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowercase = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
714
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]: A = parent A = 13 A = 7 A = True A = True A = True A = 99 A = 32 A = 2 A = 4 A = 37 A = 'gelu' A = 0.1 A = 0.1 A = 512 A = 16 A = 2 A = 0.02 A = 3 A = 4 A = None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = self.prepare_config_and_inputs() A = True A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict: A = TFEsmModel(config=A_ ) A = {'input_ids': input_ids, 'attention_mask': input_mask} A = model(A_ ) A = [input_ids, input_mask] A = model(A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]: A = True A = TFEsmModel(config=A_ ) A = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } A = model(A_ ) A = [input_ids, input_mask] A = model(A_ ,encoder_hidden_states=A_ ) # Also check the case where encoder outputs are not passed A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict: A = TFEsmForMaskedLM(config=A_ ) A = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]: A = self.num_labels A = TFEsmForTokenClassification(config=A_ ) A = {'input_ids': input_ids, 'attention_mask': input_mask} A = model(A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) _lowerCamelCase: List[str] = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: A = TFEsmModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFEsmModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @unittest.skip('Protein models do not support embedding resizing.' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip('Protein models do not support embedding resizing.' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A = model.get_bias() assert isinstance(A_ ,A_ ) for k, v in name.items(): assert isinstance(A_ ,tf.Variable ) else: A = model.get_output_embeddings() assert x is None A = model.get_bias() assert name is None @require_tf class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A = tf.constant([[0, 1, 2, 3, 4, 5]] ) A = model(A_ )[0] A = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) ,A_ ) # compare the actual values for a slice. A = tf.constant( [ [ [8.92_15_18, -10.58_98_14, -6.4_67_13_07], [-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15], [-7.78_12_47, -13.95_15_57, -3.74_05_92], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A = model(A_ )[0] # compare the actual values for a slice. A = tf.constant( [ [ [0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39], [0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22], [0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
22
0
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency _lowercase = { '''E''': 12.70, '''T''': 9.06, '''A''': 8.17, '''O''': 7.51, '''I''': 6.97, '''N''': 6.75, '''S''': 6.33, '''H''': 6.09, '''R''': 5.99, '''D''': 4.25, '''L''': 4.03, '''C''': 2.78, '''U''': 2.76, '''M''': 2.41, '''W''': 2.36, '''F''': 2.23, '''G''': 2.02, '''Y''': 1.97, '''P''': 1.93, '''B''': 1.29, '''V''': 0.98, '''K''': 0.77, '''J''': 0.15, '''X''': 0.15, '''Q''': 0.10, '''Z''': 0.07, } _lowercase = '''ETAOINSHRDLCUMWFGYPBVKJXQZ''' _lowercase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def _snake_case ( snake_case__ : str ): A = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( snake_case__ : tuple ): return x[0] def _snake_case ( snake_case__ : str ): A = get_letter_count(snake_case__ ) A = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(snake_case__ ) A = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=snake_case__ ) A = ''.join(freq_to_letter[freq] ) A = list(freq_to_letter_str.items() ) freq_pairs.sort(key=snake_case__ , reverse=snake_case__ ) A = [freq_pair[1] for freq_pair in freq_pairs] return "".join(snake_case__ ) def _snake_case ( snake_case__ : str ): A = get_frequency_order(snake_case__ ) A = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
715
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys _lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') _lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split() _lowercase = '''|'''.join(sys.argv[1:]) _lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""") _lowercase = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
22
0
"""simple docstring""" def _snake_case ( snake_case__ : int ): A = abs(snake_case__ ) A = 0 while n > 0: res += n % 10 n //= 10 return res def _snake_case ( snake_case__ : int ): A = abs(snake_case__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def _snake_case ( snake_case__ : int ): return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) ) def _snake_case ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(snake_case__ : Callable , snake_case__ : int ) -> None: A = F'{func.__name__}({value})' A = timeit(F'__main__.{call}' , setup='import __main__' ) print(F'{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(snake_case__ , snake_case__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
716
"""simple docstring""" import sys from collections import defaultdict class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ) -> int: A = [] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]: return self.node_position[vertex] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]: A = pos def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: A = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: A = 2 * start + 1 else: A = 2 * start + 2 if heap[smallest_child] < heap[start]: A , A = heap[smallest_child], positions[smallest_child] A , A = ( heap[start], positions[start], ) A , A = temp, tempa A = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,A_ ) self.top_to_bottom(A_ ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict: A = position[index] while index != 0: A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: A = heap[parent] A = position[parent] self.set_position(position[parent] ,A_ ) else: A = val A = temp self.set_position(A_ ,A_ ) break A = parent else: A = val A = temp self.set_position(A_ ,0 ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]: A = len(A_ ) // 2 - 1 for i in range(A_ ,-1 ,-1 ): self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]: A = positions[0] A = sys.maxsize self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ ) return temp def _snake_case ( snake_case__ : Dict ): A = Heap() A = [0] * len(snake_case__ ) A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph A = [] # Heap of Distance of vertices from their neighboring vertex A = [] for vertex in range(len(snake_case__ ) ): distance_tv.append(sys.maxsize ) positions.append(snake_case__ ) heap.node_position.append(snake_case__ ) A = [] A = 1 A = sys.maxsize for neighbor, distance in adjacency_list[0]: A = 0 A = distance heap.heapify(snake_case__ , snake_case__ ) for _ in range(1 , len(snake_case__ ) ): A = heap.delete_minimum(snake_case__ , snake_case__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) A = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(snake_case__ )] ): A = distance heap.bottom_to_top( snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ ) A = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _lowercase = int(input('''Enter number of edges: ''').strip()) _lowercase = defaultdict(list) for _ in range(edges_number): _lowercase = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
22
0
"""simple docstring""" import logging import os from .state import PartialState class lowerCAmelCase_ ( logging.LoggerAdapter ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[str] ) -> Optional[int]: A = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Any ,A_ : Dict ,*A_ : int ,**A_ : Dict ) -> Optional[int]: if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) A = kwargs.pop('main_process_only' ,A_ ) A = kwargs.pop('in_order' ,A_ ) if self.isEnabledFor(A_ ): if self._should_log(A_ ): A , A = self.process(A_ ,A_ ) self.logger.log(A_ ,A_ ,*A_ ,**A_ ) elif in_order: A = PartialState() for i in range(state.num_processes ): if i == state.process_index: A , A = self.process(A_ ,A_ ) self.logger.log(A_ ,A_ ,*A_ ,**A_ ) state.wait_for_everyone() def _snake_case ( snake_case__ : str , snake_case__ : str = None ): if log_level is None: A = os.environ.get('ACCELERATE_LOG_LEVEL' , snake_case__ ) A = logging.getLogger(snake_case__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case__ , {} )
717
"""simple docstring""" import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter _lowercase = True except ImportError: _lowercase = False _lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( snake_case__ : Namespace ): return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('add-new-model' ) add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' ) add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' ) add_new_model_parser.add_argument( '--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' ) add_new_model_parser.set_defaults(func=A_ ) def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]: A = testing A = testing_file A = path def _SCREAMING_SNAKE_CASE ( self : int ) -> int: warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.' ) if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(A_ ) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.' ) A = ( Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) A = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(A_ ) ) else: with open(self._testing_file ,'r' ) as configuration_file: A = json.load(A_ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,) A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + '/configuration.json' ,'r' ) as configuration_file: A = json.load(A_ ) A = configuration['lowercase_modelname'] A = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(F'{directory}/configuration.json' ) A = 'PyTorch' in generate_tensorflow_pytorch_and_flax A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax A = 'Flax' in generate_tensorflow_pytorch_and_flax A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(A_ ,exist_ok=A_ ) os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ ) # Tests require submodules as they have parent imports with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ): pass shutil.move( F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,) shutil.move( F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,) def remove_copy_lines(A_ : int ): with open(A_ ,'r' ) as f: A = f.readlines() with open(A_ ,'w' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(A_ ) if output_pytorch: if not self._testing: remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,) shutil.move( F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(A_ : str ,A_ : str ,A_ : List[str] ): # Create temp file A , A = mkstemp() A = False with fdopen(A_ ,'w' ) as new_file: with open(A_ ) as old_file: for line in old_file: new_file.write(A_ ) if line_to_copy_below in line: A = True for line_to_copy in lines_to_copy: new_file.write(A_ ) if not line_found: raise ValueError(F'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(A_ ,A_ ) # Remove original file remove(A_ ) # Move new file move(A_ ,A_ ) def skip_units(A_ : Dict ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(A_ : Tuple ): with open(A_ ) as datafile: A = [] A = False A = False for line in datafile: if "# To replace in: " in line and "##" not in line: A = line.split('"' )[1] A = skip_units(A_ ) elif "# Below: " in line and "##" not in line: A = line.split('"' )[1] A = skip_units(A_ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(A_ ,A_ ,A_ ) A = [] elif "# Replace with" in line and "##" not in line: A = [] elif "##" not in line: lines_to_copy.append(A_ ) remove(A_ ) replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(A_ )
22
0
"""simple docstring""" import argparse import copy def _snake_case ( snake_case__ : List[Any] ): A = {} with open(snake_case__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: A = [] _list.append([line.split()[1], line.split()[2]] ) A = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: A = [] _list.append([line.split()[0], line.split()[2]] ) A = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _snake_case ( snake_case__ : str , snake_case__ : Any ): with open(snake_case__ ) as f: A = f.read(1 ) A = start_node A = [] A = start_node A = 0 while visiting not in first_solution: A = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case__ ) and k[0] not in first_solution: A = k[1] A = k[0] first_solution.append(snake_case__ ) A = distance_of_first_solution + int(snake_case__ ) A = best_node first_solution.append(snake_case__ ) A = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 A = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ): A = [] for n in solution[1:-1]: A = solution.index(snake_case__ ) for kn in solution[1:-1]: A = solution.index(snake_case__ ) if n == kn: continue A = copy.deepcopy(snake_case__ ) A = kn A = n A = 0 for k in _tmp[:-1]: A = _tmp[_tmp.index(snake_case__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: A = distance + int(i[1] ) _tmp.append(snake_case__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) A = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda snake_case__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _snake_case ( snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] ): A = 1 A = first_solution A = [] A = distance_of_first_solution A = solution while count <= iters: A = find_neighborhood(snake_case__ , snake_case__ ) A = 0 A = neighborhood[index_of_best_solution] A = len(snake_case__ ) - 1 A = False while not found: A = 0 while i < len(snake_case__ ): if best_solution[i] != solution[i]: A = best_solution[i] A = solution[i] break A = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) A = True A = best_solution[:-1] A = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: A = cost A = solution else: A = index_of_best_solution + 1 A = neighborhood[index_of_best_solution] if len(snake_case__ ) >= size: tabu_list.pop(0 ) A = count + 1 return best_solution_ever, best_cost def _snake_case ( snake_case__ : List[str]=None ): A = generate_neighbours(args.File ) A , A = generate_first_solution( args.File , snake_case__ ) A , A = tabu_search( snake_case__ , snake_case__ , snake_case__ , args.Iterations , args.Size , ) print(F'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
718
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str: A = size if size is not None else {'height': 18, 'width': 18} A = parent A = batch_size A = num_channels A = image_size A = min_resolution A = max_resolution A = do_resize A = size A = do_normalize def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ImageGPTImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ ,'clusters' ) ) self.assertTrue(hasattr(A_ ,'do_resize' ) ) self.assertTrue(hasattr(A_ ,'size' ) ) self.assertTrue(hasattr(A_ ,'do_normalize' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} ) A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: A = self.image_processing_class(**self.image_processor_dict ) A = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,obj[key] ) ) else: self.assertEqual(obj[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(A_ ,'image_processor.json' ) image_processor_first.to_json_file(A_ ) A = self.image_processing_class.from_json_file(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(A_ ) A = self.image_processing_class.from_pretrained(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) @unittest.skip('ImageGPT requires clusters at initialization' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: pass def _snake_case ( ): A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' ) A = Image.open(dataset[4]['file'] ) A = Image.open(dataset[5]['file'] ) A = [imagea, imagea] return images @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' ) A = prepare_images() # test non-batched A = image_processing(images[0] ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(1, 1024) ) A = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ ) # test batched A = image_processing(A_ ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(2, 1024) ) A = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ )
22
0
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : list[int] ): if len(snake_case__ ) == 0: return array A , A = min(snake_case__ ), max(snake_case__ ) # Compute the variables A = _max - _min + 1 A , A = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: A = i - _min A = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. A = 0 for i in range(snake_case__ ): while holes_repeat[i] > 0: A = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() _lowercase = input('''Enter numbers separated by comma:\n''') _lowercase = [int(x) for x in user_input.split(''',''')] print(pigeon_sort(unsorted))
719
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def _snake_case ( snake_case__ : Optional[int] ): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' ) download_parser.add_argument( '--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,) download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' ) download_parser.set_defaults(func=A_ ) def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]: A = model A = cache A = force A = trust_remote_code def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
22
0
"""simple docstring""" from argparse import ArgumentParser from .env import EnvironmentCommand def _snake_case ( ): A = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' ) A = parser.add_subparsers(help='diffusers-cli command helpers' ) # Register commands EnvironmentCommand.register_subcommand(snake_case__ ) # Let's go A = parser.parse_args() if not hasattr(snake_case__ , 'func' ): parser.print_help() exit(1 ) # Run A = args.func(snake_case__ ) service.run() if __name__ == "__main__": main()
720
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''spm_char.model'''} _lowercase = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } _lowercase = { '''microsoft/speecht5_asr''': 10_24, '''microsoft/speecht5_tts''': 10_24, '''microsoft/speecht5_vc''': 10_24, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None: A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = vocab_file A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.sp_model.get_piece_size() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Any: A = self.__dict__.copy() A = None return state def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]: return self.sp_model.piece_to_id(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]: A = self.sp_model.IdToPiece(A_ ) return token def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]: A = [] A = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A_ ) + token A = [] else: current_sub_tokens.append(A_ ) out_string += self.sp_model.decode(A_ ) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) A = [1] if token_ids_a is None: return ([0] * len(A_ )) + suffix_ones return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
22
0
"""simple docstring""" import re def _snake_case ( snake_case__ : str ): return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )] def _snake_case ( snake_case__ : str ): A = split_input(str_ ) return "".join( [''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def _snake_case ( snake_case__ : str , snake_case__ : bool , snake_case__ : str ): try: A = split_input(snake_case__ ) if upper: A = ''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: A = ''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def _snake_case ( snake_case__ : str ): return to_simple_case(snake_case__ ) def _snake_case ( snake_case__ : str ): try: A = to_simple_case(snake_case__ ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def _snake_case ( snake_case__ : str , snake_case__ : bool ): return to_complex_case(snake_case__ , snake_case__ , '_' ) def _snake_case ( snake_case__ : str , snake_case__ : bool ): return to_complex_case(snake_case__ , snake_case__ , '-' ) if __name__ == "__main__": __import__('''doctest''').testmod()
721
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPFeatureExtractor'''] _lowercase = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
0
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowercase = 16 _lowercase = 32 def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ): A = AutoTokenizer.from_pretrained(snake_case__ ) A = load_dataset('glue' , 'mrpc' ) def tokenize_function(snake_case__ : Tuple ): # max_length=None => use the model max length (it's actually the default) A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case__ : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. A = DataLoader( tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) A = DataLoader( tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader def _snake_case ( snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): # Initialize accelerator A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A = config['lr'] A = int(config['num_epochs'] ) A = int(config['seed'] ) A = int(config['batch_size'] ) A = args.model_name_or_path set_seed(snake_case__ ) A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ ) # Instantiate optimizer A = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A = optimizer_cls(params=model.parameters() , lr=snake_case__ ) if accelerator.state.deepspeed_plugin is not None: A = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: A = 1 A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , ) else: A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # We need to keep track of how many total steps we have iterated over A = 0 # We also need to keep track of the stating epoch so files are named properly A = 0 # Now we train the model A = evaluate.load('glue' , 'mrpc' ) A = 0 A = {} for epoch in range(snake_case__ , snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): A = model(**snake_case__ ) A = outputs.loss A = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() A = 0 for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**snake_case__ ) A = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times A , A = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case__ ) - 1: A = predictions[: len(eval_dataloader.dataset ) - samples_seen] A = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , snake_case__ ) A = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: A = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(snake_case__ , snake_case__ ) def _snake_case ( ): A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , ) parser.add_argument( '--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , ) A = parser.parse_args() A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
700
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: Dict = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[Any] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
22
0
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _snake_case ( ): A = ArgumentParser( description=( 'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=snake_case__ , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=snake_case__ , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=snake_case__ ) return parser.parse_args() def _snake_case ( ): A = parse_args() # Import training_script as a module. A = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) A = script_fpath.stem A = importlib.import_module(snake_case__ ) # Patch sys.argv A = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
701
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _lowercase = 8 def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ): A = x.device A = (x * 255).int().clamp(0 , 255 ) A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' ) A = ((x & mask) != 0).float() A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' ) A = bits * 2 - 1 return bits def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ): A = x.device A = (x > 0).int() A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 ) A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 255).clamp(0.0 , 1.0 ) def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ): if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) A = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas A = self.alphas_cumprod[timestep] A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod A = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) A = self._get_variance(snake_case__ , snake_case__ ) A = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu' A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ ) A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise A = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ): A = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 ) else: A = None # 1. compute alphas, betas A = self.alphas_cumprod[t] A = self.alphas_cumprod[t - 1] if t > 0 else self.one A = 1 - alpha_prod_t A = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": A = model_output else: raise ValueError(F'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A = 0 if t > 0: A = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device ) A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise A = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]: super().__init__() A = bit_scale A = ( ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=A_ ,scheduler=A_ ) @torch.no_grad() def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]: A = torch.randn( (batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,) A = decimal_to_bits(A_ ) * self.bit_scale A = latents.to(self.device ) self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual A = self.unet(A_ ,A_ ).sample # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample A = bits_to_decimal(A_ ) if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
22
0
"""simple docstring""" from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' _lowerCamelCase: str = '''pixel_values''' _lowerCamelCase: Any = False _lowerCamelCase: List[Any] = TimmBackboneConfig def __init__( self : str ,A_ : Tuple ,**A_ : Union[str, Any] ) -> Any: requires_backends(self ,'timm' ) super().__init__(A_ ) A = config if config.backbone is None: raise ValueError('backbone is not set in the config. Please set it to a timm model name.' ) if config.backbone not in timm.list_models(): raise ValueError(F'backbone {config.backbone} is not supported by timm.' ) if hasattr(A_ ,'out_features' ) and config.out_features is not None: raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' ) A = getattr(A_ ,'use_pretrained_backbone' ,A_ ) if pretrained is None: raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' ) # We just take the final layer by default. This matches the default for the transformers models. A = config.out_indices if getattr(A_ ,'out_indices' ,A_ ) is not None else (-1,) A = timm.create_model( config.backbone ,pretrained=A_ ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=A_ ,**A_ ,) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. A = self._backbone.return_layers A = {layer['module']: str(A_ ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,A_ : Union[str, Any] ,*A_ : List[Any] ,**A_ : Optional[Any] ) -> Optional[Any]: requires_backends(cls ,['vision', 'timm'] ) from ...models.timm_backbone import TimmBackboneConfig A = kwargs.pop('config' ,TimmBackboneConfig() ) A = kwargs.pop('use_timm_backbone' ,A_ ) if not use_timm: raise ValueError('use_timm_backbone must be True for timm backbones' ) A = kwargs.pop('num_channels' ,config.num_channels ) A = kwargs.pop('features_only' ,config.features_only ) A = kwargs.pop('use_pretrained_backbone' ,config.use_pretrained_backbone ) A = kwargs.pop('out_indices' ,config.out_indices ) A = TimmBackboneConfig( backbone=A_ ,num_channels=A_ ,features_only=A_ ,use_pretrained_backbone=A_ ,out_indices=A_ ,) return super()._from_config(A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Any ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : Optional[int]=None ,A_ : Any=None ,A_ : List[Any]=None ,**A_ : Optional[int] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: A = return_dict if return_dict is not None else self.config.use_return_dict A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('Cannot output attentions for timm backbones at the moment' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone A = self._all_layers A = self._backbone(A_ ,**A_ ) A = self._return_layers A = tuple(hidden_states[i] for i in self.out_indices ) else: A = self._backbone(A_ ,**A_ ) A = None A = tuple(A_ ) A = tuple(A_ ) if hidden_states is not None else None if not return_dict: A = (feature_maps,) if output_hidden_states: A = output + (hidden_states,) return output return BackboneOutput(feature_maps=A_ ,hidden_states=A_ ,attentions=A_ )
702
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''yolos''' def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = num_detection_tokens A = use_mid_position_embeddings A = auxiliary_loss # Hungarian matcher A = class_cost A = bbox_cost A = giou_cost # Loss coefficients A = bbox_loss_coefficient A = giou_loss_coefficient A = eos_coefficient class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> float: return 1e-4 @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: return 12
22
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''yolos''' def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = num_detection_tokens A = use_mid_position_embeddings A = auxiliary_loss # Hungarian matcher A = class_cost A = bbox_cost A = giou_cost # Loss coefficients A = bbox_loss_coefficient A = giou_loss_coefficient A = eos_coefficient class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> float: return 1e-4 @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: return 12
703
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
22
0
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Dict = '''WhisperFeatureExtractor''' _lowerCamelCase: str = '''WhisperTokenizer''' def __init__( self : Optional[Any] ,A_ : Tuple ,A_ : Any ) -> List[Any]: super().__init__(A_ ,A_ ) A = self.feature_extractor A = False def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any]=None ,A_ : Optional[Any]=None ,A_ : List[Any]=True ) -> Optional[int]: return self.tokenizer.get_decoder_prompt_ids(task=A_ ,language=A_ ,no_timestamps=A_ ) def __call__( self : Any ,*A_ : List[Any] ,**A_ : List[str] ) -> Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A_ ,**A_ ) A = kwargs.pop('audio' ,A_ ) A = kwargs.pop('sampling_rate' ,A_ ) A = kwargs.pop('text' ,A_ ) if len(A_ ) > 0: A = args[0] A = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: A = self.feature_extractor(A_ ,*A_ ,sampling_rate=A_ ,**A_ ) if text is not None: A = self.tokenizer(A_ ,**A_ ) if text is None: return inputs elif audio is None: return encodings else: A = encodings['input_ids'] return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : int ,**A_ : str ) -> Optional[int]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : str ,**A_ : List[Any] ) -> int: return self.tokenizer.decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : List[str]="np" ) -> List[Any]: return self.tokenizer.get_prompt_ids(A_ ,return_tensors=A_ )
704
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Dict = ['''pixel_values'''] def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None: super().__init__(**A_ ) A = size if size is not None else {'shortest_edge': 256} A = get_size_dict(A_ ,default_to_square=A_ ) A = crop_size if crop_size is not None else {'height': 224, 'width': 224} A = get_size_dict(A_ ,param_name='crop_size' ) A = do_resize A = size A = resample A = do_center_crop A = crop_size A = do_rescale A = rescale_factor A = do_normalize A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray: A = get_size_dict(A_ ,default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ ) return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray: A = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray: return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray: return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]: A = do_resize if do_resize is not None else self.do_resize A = size if size is not None else self.size A = get_size_dict(A_ ,default_to_square=A_ ) A = resample if resample is not None else self.resample A = do_center_crop if do_center_crop is not None else self.do_center_crop A = crop_size if crop_size is not None else self.crop_size A = get_size_dict(A_ ,param_name='crop_size' ) A = do_rescale if do_rescale is not None else self.do_rescale A = rescale_factor if rescale_factor is not None else self.rescale_factor A = do_normalize if do_normalize is not None else self.do_normalize A = image_mean if image_mean is not None else self.image_mean A = image_std if image_std is not None else self.image_std A = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. A = [to_numpy_array(A_ ) for image in images] if do_resize: A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images] if do_center_crop: A = [self.center_crop(image=A_ ,size=A_ ) for image in images] if do_rescale: A = [self.rescale(image=A_ ,scale=A_ ) for image in images] if do_normalize: A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images] A = [to_channel_dimension_format(A_ ,A_ ) for image in images] A = {'pixel_values': images} return BatchFeature(data=A_ ,tensor_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str: A = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(A_ ): A = target_sizes.numpy() A = [] for idx in range(len(A_ ) ): A = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ ) A = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: A = logits.argmax(dim=1 ) A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
0
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: A = 0 def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: with tempfile.TemporaryDirectory() as tmpdirname: A = Path(A_ ) / 'preprocessor_config.json' A = Path(A_ ) / 'config.json' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} ,open(A_ ,'w' ) ,) json.dump({'model_type': 'clip'} ,open(A_ ,'w' ) ) A = AutoImageProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: A = Path(A_ ) / 'preprocessor_config.json' A = Path(A_ ) / 'config.json' json.dump( {'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} ,open(A_ ,'w' ) ,) json.dump({'model_type': 'clip'} ,open(A_ ,'w' ) ) A = AutoImageProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: A = CLIPConfig() # Create a dummy config file with image_proceesor_type A = Path(A_ ) / 'preprocessor_config.json' A = Path(A_ ) / 'config.json' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} ,open(A_ ,'w' ) ,) json.dump({'model_type': 'clip'} ,open(A_ ,'w' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally A = AutoImageProcessor.from_pretrained(A_ ).to_dict() config_dict.pop('image_processor_type' ) A = CLIPImageProcessor(**A_ ) # save in new folder model_config.save_pretrained(A_ ) config.save_pretrained(A_ ) A = AutoImageProcessor.from_pretrained(A_ ) # make sure private variable is not incorrectly saved A = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: A = Path(A_ ) / 'preprocessor_config.json' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} ,open(A_ ,'w' ) ,) A = AutoImageProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: with self.assertRaisesRegex( A_ ,'clip-base is not a local folder and is not a valid model identifier' ): A = AutoImageProcessor.from_pretrained('clip-base' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: with self.assertRaisesRegex( A_ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): A = AutoImageProcessor.from_pretrained(A_ ,revision='aaaaaa' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: with self.assertRaisesRegex( A_ ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,): A = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(A_ ): A = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(A_ ): A = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=A_ ) A = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=A_ ) self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(A_ ) A = AutoImageProcessor.from_pretrained(A_ ,trust_remote_code=A_ ) self.assertEqual(reloaded_image_processor.__class__.__name__ ,'NewImageProcessor' ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: try: AutoConfig.register('custom' ,A_ ) AutoImageProcessor.register(A_ ,A_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A_ ): AutoImageProcessor.register(A_ ,A_ ) with tempfile.TemporaryDirectory() as tmpdirname: A = Path(A_ ) / 'preprocessor_config.json' A = Path(A_ ) / 'config.json' json.dump( {'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} ,open(A_ ,'w' ) ,) json.dump({'model_type': 'clip'} ,open(A_ ,'w' ) ) A = CustomImageProcessor.from_pretrained(A_ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(A_ ) A = AutoImageProcessor.from_pretrained(A_ ) self.assertIsInstance(A_ ,A_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = True try: AutoConfig.register('custom' ,A_ ) AutoImageProcessor.register(A_ ,A_ ) # If remote code is not set, the default is to use local A = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' ) self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. A = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=A_ ) self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub A = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=A_ ) self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' ) self.assertTrue(not hasattr(A_ ,'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
705
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 _lowercase = data_utils.TransfoXLTokenizer _lowercase = data_utils.TransfoXLCorpus _lowercase = data_utils _lowercase = data_utils def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(snake_case__ , 'rb' ) as fp: A = pickle.load(snake_case__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'Save vocabulary to {pytorch_vocab_dump_path}' ) A = corpus.vocab.__dict__ torch.save(snake_case__ , snake_case__ ) A = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , snake_case__ ) A = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(snake_case__ , snake_case__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model A = os.path.abspath(snake_case__ ) A = os.path.abspath(snake_case__ ) print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": A = TransfoXLConfig() else: A = TransfoXLConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) A = TransfoXLLMHeadModel(snake_case__ ) A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model A = os.path.join(snake_case__ , snake_case__ ) A = os.path.join(snake_case__ , snake_case__ ) print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' ) torch.save(model.state_dict() , snake_case__ ) print(F'Save configuration file to {os.path.abspath(snake_case__ )}' ) with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) _lowercase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
22
0
"""simple docstring""" def _snake_case ( snake_case__ : list[int] , snake_case__ : list[int] ): A = len(snake_case__ ) print('The following activities are selected:' ) # The first activity is always selected A = 0 print(snake_case__ , end=',' ) # Consider rest of the activities for j in range(snake_case__ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(snake_case__ , end=',' ) A = j if __name__ == "__main__": import doctest doctest.testmod() _lowercase = [1, 3, 0, 5, 8, 5] _lowercase = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
706
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ) -> int: A = {} def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int: if self.graph.get(A_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A = [[w, v]] if not self.graph.get(A_ ): A = [] def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return list(self.graph ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]: if self.graph.get(A_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str: if s == d: return [] A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return visited def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int: if c == -1: A = floor(random() * 1_0000 ) + 10 for i in range(A_ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A = floor(random() * c ) + 1 if n != i: self.add_pair(A_ ,A_ ,1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]: A = deque() A = [] if s == -2: A = list(self.graph )[0] d.append(A_ ) visited.append(A_ ) while d: A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any: A = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str: return len(self.graph[u] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any: A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s A = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return sorted_nodes def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return False def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str: A = time() self.dfs(A_ ,A_ ) A = time() return end - begin def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict: A = time() self.bfs(A_ ) A = time() return end - begin class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ) -> Tuple: A = {} def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict: # check if the u exists if self.graph.get(A_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A = [[w, v]] # add the other way if self.graph.get(A_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A = [[w, u]] def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]: if self.graph.get(A_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A_ ) # the other way round if self.graph.get(A_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int: if s == d: return [] A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]: if c == -1: A = floor(random() * 1_0000 ) + 10 for i in range(A_ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A = floor(random() * c ) + 1 if n != i: self.add_pair(A_ ,A_ ,1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]: A = deque() A = [] if s == -2: A = list(self.graph )[0] d.append(A_ ) visited.append(A_ ) while d: A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]: return len(self.graph[u] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: return list(self.graph ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any: A = time() self.dfs(A_ ,A_ ) A = time() return end - begin def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]: A = time() self.bfs(A_ ) A = time() return end - begin
22
0
"""simple docstring""" import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def _snake_case ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Any=5 ): # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 A = torch.tensor(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) ).unsqueeze(0 ) # Batch size 1 A = model(snake_case__ )[0] # The last hidden-state is the first element of the output tuple A = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() A = logits[0, masked_index, :] A = logits.softmax(dim=0 ) A , A = prob.topk(k=snake_case__ , dim=0 ) A = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case__ ) )] ) A = tokenizer.mask_token A = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): A = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(snake_case__ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(snake_case__ ) , snake_case__ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(snake_case__ , snake_case__ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs _lowercase = CamembertTokenizer.from_pretrained('''camembert-base''') _lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() _lowercase = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
707
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _snake_case ( snake_case__ : str = "isbn/0140328726" ): A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: A = F'{olid} is not a valid Open Library olid' raise ValueError(snake_case__ ) return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json() def _snake_case ( snake_case__ : dict ): A = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} A = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] A = data['First sentence']['value'] for key, value in data.items(): if isinstance(snake_case__ , snake_case__ ): A = ', '.join(snake_case__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: _lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(F"""\nSearching Open Library for ISBN: {isbn}...\n""") try: _lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}""")) print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"""Sorry, there are no results for ISBN: {isbn}.""")
22
0
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _snake_case ( snake_case__ : int ): A = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : Any ): A , A = emb.weight.shape A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) A = emb.weight.data return lin_layer def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : int="facebook/mbart-large-en-ro" , snake_case__ : Any=False , snake_case__ : int=False ): A = torch.load(snake_case__ , map_location='cpu' )['model'] remove_ignore_keys_(snake_case__ ) A = state_dict['encoder.embed_tokens.weight'].shape[0] A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ ) if mbart_aa and finetuned: A = 'relu' A = state_dict['decoder.embed_tokens.weight'] A = MBartForConditionalGeneration(snake_case__ ) model.model.load_state_dict(snake_case__ ) if finetuned: A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') _lowercase = parser.parse_args() _lowercase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
708
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''], '''tokenization_perceiver''': ['''PerceiverTokenizer'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''PerceiverFeatureExtractor'''] _lowercase = ['''PerceiverImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PerceiverForImageClassificationConvProcessing''', '''PerceiverForImageClassificationFourier''', '''PerceiverForImageClassificationLearned''', '''PerceiverForMaskedLM''', '''PerceiverForMultimodalAutoencoding''', '''PerceiverForOpticalFlow''', '''PerceiverForSequenceClassification''', '''PerceiverLayer''', '''PerceiverModel''', '''PerceiverPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
0
def _snake_case ( snake_case__ : int = 200_0000 ): A = [0 for i in range(n + 1 )] A = 1 A = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , snake_case__ ): A = 1 A = 0 for i in range(snake_case__ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
709
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def _snake_case ( snake_case__ : int ): A = SwinvaConfig() A = swinva_name.split('_' ) A = name_split[1] if "to" in name_split[3]: A = int(name_split[3][-3:] ) else: A = int(name_split[3] ) if "to" in name_split[2]: A = int(name_split[2][-2:] ) else: A = int(name_split[2][6:] ) if model_size == "tiny": A = 96 A = (2, 2, 6, 2) A = (3, 6, 12, 24) elif model_size == "small": A = 96 A = (2, 2, 18, 2) A = (3, 6, 12, 24) elif model_size == "base": A = 128 A = (2, 2, 18, 2) A = (4, 8, 16, 32) else: A = 192 A = (2, 2, 18, 2) A = (6, 12, 24, 48) if "to" in swinva_name: A = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): A = 2_1841 A = 'huggingface/label-files' A = 'imagenet-22k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} else: A = 1000 A = 'huggingface/label-files' A = 'imagenet-1k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} A = img_size A = num_classes A = embed_dim A = depths A = num_heads A = window_size return config def _snake_case ( snake_case__ : List[Any] ): if "patch_embed.proj" in name: A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: A = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: A = 'encoder.' + name if "attn.proj" in name: A = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A = name.replace('attn' , 'attention.self' ) if "norm1" in name: A = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: A = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: A = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: A = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if name == "norm.weight": A = 'layernorm.weight' if name == "norm.bias": A = 'layernorm.bias' if "head" in name: A = name.replace('head' , 'classifier' ) else: A = 'swinv2.' + name return name def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ): for key in orig_state_dict.copy().keys(): A = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A = key.split('.' ) A = int(key_split[1] ) A = int(key_split[3] ) A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A = val[:dim, :] A = val[dim : dim * 2, :] A = val[-dim:, :] else: A = val[:dim] A = val[ dim : dim * 2 ] A = val[-dim:] else: A = val return orig_state_dict def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ): A = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A = get_swinva_config(snake_case__ ) A = SwinvaForImageClassification(snake_case__ ) model.eval() A = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) ) A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A = image_processor(images=snake_case__ , return_tensors='pt' ) A = timm_model(inputs['pixel_values'] ) A = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) model.push_to_hub( repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swinv2_name''', default='''swinv2_tiny_patch4_window8_256''', type=str, help='''Name of the Swinv2 timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowercase = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
22
0
"""simple docstring""" import math def _snake_case ( snake_case__ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( snake_case__ : int = 1_0001 ): try: A = int(snake_case__ ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) A = [] A = 2 while len(snake_case__ ) < nth: if is_prime(snake_case__ ): primes.append(snake_case__ ) num += 1 else: num += 1 return primes[len(snake_case__ ) - 1] if __name__ == "__main__": print(F"""{solution() = }""")
710
"""simple docstring""" from math import pi, sqrt def _snake_case ( snake_case__ : float ): if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(snake_case__ ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(snake_case__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _snake_case ( ): assert gamma(0.5 ) == sqrt(snake_case__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _lowercase = 1.0 while num: _lowercase = float(input('''Gamma of: ''')) print(F"""gamma({num}) = {gamma(num)}""") print('''\nEnter 0 to exit...''')
22
0
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) _lowercase = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def _snake_case ( snake_case__ : str , snake_case__ : str ): A = { 'attention_cell': 'multi_head', 'num_layers': 4, 'units': 1024, 'hidden_size': 768, 'max_length': 512, 'num_heads': 8, 'scaled': True, 'dropout': 0.1, 'use_residual': True, 'embed_size': 1024, 'embed_dropout': 0.1, 'word_embed': None, 'layer_norm_eps': 1e-5, 'token_type_vocab_size': 2, } A = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py A = BERTEncoder( attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , snake_case__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later A = 'openwebtext_ccnews_stories_books_cased' # Specify download folder to Gluonnlp's vocab A = os.path.join(get_home_dir() , 'models' ) A = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ ) A = nlp.model.BERTModel( snake_case__ , len(snake_case__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=snake_case__ , use_decoder=snake_case__ , ) original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ ) A = original_bort._collect_params_with_prefix() # Build our config 🤗 A = { 'architectures': ['BertForMaskedLM'], 'attention_probs_dropout_prob': predefined_args['dropout'], 'hidden_act': 'gelu', 'hidden_dropout_prob': predefined_args['dropout'], 'hidden_size': predefined_args['embed_size'], 'initializer_range': 0.02, 'intermediate_size': predefined_args['hidden_size'], 'layer_norm_eps': predefined_args['layer_norm_eps'], 'max_position_embeddings': predefined_args['max_length'], 'model_type': 'bort', 'num_attention_heads': predefined_args['num_heads'], 'num_hidden_layers': predefined_args['num_layers'], 'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa 'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa 'vocab_size': len(snake_case__ ), } A = BertConfig.from_dict(snake_case__ ) A = BertForMaskedLM(snake_case__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case__ : Any ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = hf_param.shape A = to_torch(params[gluon_param] ) A = gluon_param.shape assert ( shape_hf == shape_gluon ), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param A = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' ) A = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' ) A = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' ) A = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) A = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): A = hf_bort_model.bert.encoder.layer[i] # self attention A = layer.attention.self A = check_and_map_params( self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) A = check_and_map_params( self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) A = check_and_map_params( self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) A = check_and_map_params( self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) A = check_and_map_params( self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) A = check_and_map_params( self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output A = layer.attention.output A = check_and_map_params( self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' ) A = check_and_map_params( self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' ) A = check_and_map_params( self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' ) A = check_and_map_params( self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate A = layer.intermediate A = check_and_map_params( intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) A = check_and_map_params( intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output A = layer.output A = check_and_map_params( bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) A = check_and_map_params( bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) A = check_and_map_params( bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) A = check_and_map_params( bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models A = RobertaTokenizer.from_pretrained('roberta-base' ) A = tokenizer.encode_plus(snake_case__ )['input_ids'] # Get gluon output A = mx.nd.array([input_ids] ) A = original_bort(inputs=snake_case__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case__ ) A = BertModel.from_pretrained(snake_case__ ) hf_bort_model.eval() A = tokenizer.encode_plus(snake_case__ , return_tensors='pt' ) A = hf_bort_model(**snake_case__ )[0] A = output_gluon[0].asnumpy() A = output_hf[0].detach().numpy() A = np.max(np.abs(hf_layer - gluon_layer ) ).item() A = np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) if success: print('✔️ Both model do output the same tensors' ) else: print('❌ Both model do **NOT** output the same tensors' ) print('Absolute difference is:' , snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
711
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: torch.FloatTensor class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]: super().__init__() A = layers_per_block A = torch.nn.Convad( A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,) A = None A = nn.ModuleList([] ) # down A = block_out_channels[0] for i, down_block_type in enumerate(A_ ): A = output_channel A = block_out_channels[i] A = i == len(A_ ) - 1 A = get_down_block( A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,) self.down_blocks.append(A_ ) # mid A = UNetMidBlockaD( in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,) # out A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 ) A = nn.SiLU() A = 2 * out_channels if double_z else out_channels A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 ) A = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]: A = x A = self.conv_in(A_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(A_ : Dict ): def custom_forward(*A_ : Tuple ): return module(*A_ ) return custom_forward # down if is_torch_version('>=' ,'1.11.0' ): for down_block in self.down_blocks: A = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ ) # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ ) else: for down_block in self.down_blocks: A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ) # middle A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ ) else: # down for down_block in self.down_blocks: A = down_block(A_ ) # middle A = self.mid_block(A_ ) # post-process A = self.conv_norm_out(A_ ) A = self.conv_act(A_ ) A = self.conv_out(A_ ) return sample class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any: super().__init__() A = layers_per_block A = nn.Convad( A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,) A = None A = nn.ModuleList([] ) A = in_channels if norm_type == 'spatial' else None # mid A = UNetMidBlockaD( in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,) # up A = list(reversed(A_ ) ) A = reversed_block_out_channels[0] for i, up_block_type in enumerate(A_ ): A = output_channel A = reversed_block_out_channels[i] A = i == len(A_ ) - 1 A = get_up_block( A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,) self.up_blocks.append(A_ ) A = output_channel # out if norm_type == "spatial": A = SpatialNorm(block_out_channels[0] ,A_ ) else: A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 ) A = nn.SiLU() A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 ) A = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any: A = z A = self.conv_in(A_ ) A = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(A_ : List[Any] ): def custom_forward(*A_ : Tuple ): return module(*A_ ) return custom_forward if is_torch_version('>=' ,'1.11.0' ): # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ ) else: # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ ) else: # middle A = self.mid_block(A_ ,A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = up_block(A_ ,A_ ) # post-process if latent_embeds is None: A = self.conv_norm_out(A_ ) else: A = self.conv_norm_out(A_ ,A_ ) A = self.conv_act(A_ ) A = self.conv_out(A_ ) return sample class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]: super().__init__() A = n_e A = vq_embed_dim A = beta A = legacy A = nn.Embedding(self.n_e ,self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e ) A = remap if self.remap is not None: self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) ) A = self.used.shape[0] A = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": A = self.re_embed A = self.re_embed + 1 print( F'Remapping {self.n_e} indices to {self.re_embed} indices. ' F'Using {self.unknown_index} for unknown indices.' ) else: A = n_e A = sane_index_shape def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any: A = inds.shape assert len(A_ ) > 1 A = inds.reshape(ishape[0] ,-1 ) A = self.used.to(A_ ) A = (inds[:, :, None] == used[None, None, ...]).long() A = match.argmax(-1 ) A = match.sum(2 ) < 1 if self.unknown_index == "random": A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device ) else: A = self.unknown_index return new.reshape(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = inds.shape assert len(A_ ) > 1 A = inds.reshape(ishape[0] ,-1 ) A = self.used.to(A_ ) if self.re_embed > self.used.shape[0]: # extra token A = 0 # simply set to zero A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ ) return back.reshape(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str: # reshape z -> (batch, height, width, channel) and flatten A = z.permute(0 ,2 ,3 ,1 ).contiguous() A = z.view(-1 ,self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 ) A = self.embedding(A_ ).view(z.shape ) A = None A = None # compute loss for embedding if not self.legacy: A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients A = z + (z_q - z).detach() # reshape back to match original input shape A = z_q.permute(0 ,3 ,1 ,2 ).contiguous() if self.remap is not None: A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis A = self.remap_to_used(A_ ) A = min_encoding_indices.reshape(-1 ,1 ) # flatten if self.sane_index_shape: A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]: # shape specifying (batch, height, width, channel) if self.remap is not None: A = indices.reshape(shape[0] ,-1 ) # add batch axis A = self.unmap_to_all(A_ ) A = indices.reshape(-1 ) # flatten again # get quantized latent vectors A = self.embedding(A_ ) if shape is not None: A = z_q.view(A_ ) # reshape back to match original input shape A = z_q.permute(0 ,3 ,1 ,2 ).contiguous() return z_q class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]: A = parameters A , A = torch.chunk(A_ ,2 ,dim=1 ) A = torch.clamp(self.logvar ,-30.0 ,20.0 ) A = deterministic A = torch.exp(0.5 * self.logvar ) A = torch.exp(self.logvar ) if self.deterministic: A = A = torch.zeros_like( self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype A = randn_tensor( self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype ) A = self.mean + self.std * sample return x def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean ,2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar ,dim=[1, 2, 3] ,) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]: if self.deterministic: return torch.Tensor([0.0] ) A = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: return self.mean
22
0
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def _snake_case ( snake_case__ : List[str] , snake_case__ : List[Any]=False ): try: A = os.environ[key] except KeyError: # KEY isn't set, default to `default`. A = default else: # KEY is set, convert it to True or False. try: A = strtobool(snake_case__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'If set, {key} must be yes or no.' ) return _value _lowercase = parse_flag_from_env('''RUN_SLOW''', default=False) _lowercase = parse_flag_from_env('''RUN_REMOTE''', default=False) _lowercase = parse_flag_from_env('''RUN_LOCAL''', default=True) _lowercase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression _lowercase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') _lowercase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') _lowercase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio _lowercase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam _lowercase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility _lowercase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows _lowercase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def _snake_case ( snake_case__ : int ): try: import faiss # noqa except ImportError: A = unittest.skip('test requires faiss' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : Dict ): try: import regex # noqa except ImportError: A = unittest.skip('test requires regex' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : List[str] ): try: import elasticsearch # noqa except ImportError: A = unittest.skip('test requires elasticsearch' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : int ): try: import sqlalchemy # noqa except ImportError: A = unittest.skip('test requires sqlalchemy' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : List[Any] ): if not config.TORCH_AVAILABLE: A = unittest.skip('test requires PyTorch' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : Optional[Any] ): if not config.TF_AVAILABLE: A = unittest.skip('test requires TensorFlow' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : Tuple ): if not config.JAX_AVAILABLE: A = unittest.skip('test requires JAX' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : List[Any] ): if not config.PIL_AVAILABLE: A = unittest.skip('test requires Pillow' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : Any ): try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(snake_case__ ) else: return test_case def _snake_case ( snake_case__ : Any ): try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(snake_case__ ) else: return test_case def _snake_case ( snake_case__ : Union[str, Any] ): try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(snake_case__ ) else: return test_case def _snake_case ( snake_case__ : Tuple ): def _require_spacy_model(snake_case__ : Union[str, Any] ): try: import spacy # noqa F401 spacy.load(snake_case__ ) except ImportError: return unittest.skip('test requires spacy' )(snake_case__ ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(snake_case__ ) )(snake_case__ ) else: return test_case return _require_spacy_model def _snake_case ( snake_case__ : Optional[int] ): try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(snake_case__ ) else: return test_case def _snake_case ( snake_case__ : str ): try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(snake_case__ ) else: return test_case def _snake_case ( snake_case__ : Tuple ): if not _run_slow_tests or _run_slow_tests == 0: A = unittest.skip('test is slow' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : Optional[int] ): if not _run_local_tests or _run_local_tests == 0: A = unittest.skip('test is local' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : Union[str, Any] ): if not _run_packaged_tests or _run_packaged_tests == 0: A = unittest.skip('test is packaged' )(snake_case__ ) return test_case def _snake_case ( snake_case__ : str ): if not _run_remote_tests or _run_remote_tests == 0: A = unittest.skip('test requires remote' )(snake_case__ ) return test_case def _snake_case ( *snake_case__ : Union[str, Any] ): def decorate(cls : Tuple ): for name, fn in cls.__dict__.items(): if callable(snake_case__ ) and name.startswith('test' ): for decorator in decorators: A = decorator(snake_case__ ) setattr(cls , snake_case__ , snake_case__ ) return cls return decorate class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' pass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = 0 _lowerCamelCase: Dict = 1 _lowerCamelCase: Any = 2 @contextmanager def _snake_case ( snake_case__ : Optional[Any]=OfflineSimulationMode.CONNECTION_FAILS , snake_case__ : Optional[Any]=1e-16 ): A = requests.Session().request def timeout_request(snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Dict , **snake_case__ : Tuple ): # Change the url to an invalid url so that the connection hangs A = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' ) A = timeout try: return online_request(snake_case__ , snake_case__ , **snake_case__ ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier A = url A = e.args[0] A = (max_retry_error.args[0].replace('10.255.255.1' , F'OfflineMock[{url}]' ),) A = (max_retry_error,) raise def raise_connection_error(snake_case__ : Optional[Any] , snake_case__ : Dict , **snake_case__ : int ): raise requests.ConnectionError('Offline mode is enabled.' , request=snake_case__ ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , snake_case__ ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , snake_case__ ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , snake_case__ ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def _snake_case ( *snake_case__ : str , **snake_case__ : Tuple ): A = str(Path().resolve() ) with tempfile.TemporaryDirectory(*snake_case__ , **snake_case__ ) as tmp_dir: try: os.chdir(snake_case__ ) yield finally: os.chdir(snake_case__ ) @contextmanager def _snake_case ( ): import gc gc.collect() A = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def _snake_case ( ): import gc gc.collect() A = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Any ): return deepcopy(snake_case__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(snake_case__ ).integers(0 , 100 , 10 ).tolist() def _snake_case ( snake_case__ : Dict ): import decorator from requests.exceptions import HTTPError def _wrapper(snake_case__ : Optional[Any] , *snake_case__ : List[Any] , **snake_case__ : List[Any] ): try: return func(*snake_case__ , **snake_case__ ) except HTTPError as err: if str(snake_case__ ).startswith('500' ) or str(snake_case__ ).startswith('502' ): pytest.xfail(str(snake_case__ ) ) raise err return decorator.decorator(_wrapper , snake_case__ ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Union[str, Any] ,A_ : int ,A_ : List[Any] ) -> Any: A = returncode A = stdout A = stderr async def _snake_case ( snake_case__ : int , snake_case__ : List[str] ): while True: A = await stream.readline() if line: callback(snake_case__ ) else: break async def _snake_case ( snake_case__ : List[Any] , snake_case__ : Dict=None , snake_case__ : int=None , snake_case__ : List[str]=None , snake_case__ : List[Any]=False , snake_case__ : str=False ): if echo: print('\nRunning: ' , ' '.join(snake_case__ ) ) A = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=snake_case__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) A = [] A = [] def tee(snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any]="" ): A = line.decode('utf-8' ).rstrip() sink.append(snake_case__ ) if not quiet: print(snake_case__ , snake_case__ , file=snake_case__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stderr , label='stderr:' ) ), ] , timeout=snake_case__ , ) return _RunOutput(await p.wait() , snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=180 , snake_case__ : Dict=False , snake_case__ : List[str]=True ): A = asyncio.get_event_loop() A = loop.run_until_complete( _stream_subprocess(snake_case__ , env=snake_case__ , stdin=snake_case__ , timeout=snake_case__ , quiet=snake_case__ , echo=snake_case__ ) ) A = ' '.join(snake_case__ ) if result.returncode > 0: A = '\n'.join(result.stderr ) raise RuntimeError( F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n' F'The combined stderr from workers follows:\n{stderr}' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'\'{cmd_str}\' produced no output.' ) return result def _snake_case ( ): A = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) A = re.sub(r'^gw' , '' , snake_case__ , 0 , re.M ) return int(snake_case__ ) def _snake_case ( ): A = 2_9500 A = pytest_xdist_worker_id() return port + uniq_delta
712
"""simple docstring""" def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ): A = len(snake_case__ ) A = [[0] * n for i in range(snake_case__ )] for i in range(snake_case__ ): A = y_points[i] for i in range(2 , snake_case__ ): for j in range(snake_case__ , snake_case__ ): A = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
22
0
def _snake_case ( snake_case__ : int ): if num <= 0: raise ValueError('Input must be a positive integer' ) A = [True] * (num + 1) A = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , snake_case__ ): A = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() _lowercase = int(input('''Enter a positive integer: ''').strip()) print(prime_sieve_eratosthenes(user_num))
713
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]: A = parent A = batch_size A = is_training A = use_auxiliary_loss A = num_queries A = num_channels A = min_size A = max_size A = num_labels A = hidden_dim A = hidden_dim def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( A_ ) A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ ) A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5 ).float() A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long() A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: A = MaskaFormerConfig( hidden_size=self.hidden_dim ,) A = self.num_queries A = self.num_labels A = [1, 1, 1, 1] A = self.num_channels A = 64 A = 128 A = self.hidden_dim A = self.hidden_dim A = self.hidden_dim return config def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A , A , A , A , A = self.prepare_config_and_inputs() A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]: A = output.encoder_hidden_states A = output.pixel_decoder_hidden_states A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A_ ) ,config.decoder_layers ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str: with torch.no_grad(): A = MaskaFormerModel(config=A_ ) model.to(A_ ) model.eval() A = model(pixel_values=A_ ,pixel_mask=A_ ) A = model(A_ ,output_hidden_states=A_ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]: A = MaskaFormerForUniversalSegmentation(config=A_ ) model.to(A_ ) model.eval() def comm_check_on_output(A_ : str ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): A = model(pixel_values=A_ ,pixel_mask=A_ ) A = model(A_ ) comm_check_on_output(A_ ) A = model( pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ ) comm_check_on_output(A_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} _lowerCamelCase: int = False _lowerCamelCase: Dict = False _lowerCamelCase: List[str] = False _lowerCamelCase: int = False def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = MaskaFormerModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: A = MaskaFormerModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = (self.model_tester.min_size,) * 2 A = { 'pixel_values': torch.randn((2, 3, *size) ,device=A_ ), 'mask_labels': torch.randn((2, 10, *size) ,device=A_ ), 'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(), } A = self.model_tester.get_config() A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ ) A = model(**A_ ) self.assertTrue(outputs.loss is not None ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ).to(A_ ) A = model(**A_ ,output_attentions=A_ ) self.assertTrue(outputs.attentions is not None ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: if not self.model_tester.is_training: return A = self.all_model_classes[1] A , A , A , A , A = self.model_tester.prepare_config_and_inputs() A = model_class(A_ ) model.to(A_ ) model.train() A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = self.all_model_classes[1] A , A , A , A , A = self.model_tester.prepare_config_and_inputs() A = True A = True A = model_class(A_ ).to(A_ ) model.train() A = model(A_ ,mask_labels=A_ ,class_labels=A_ ) A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=A_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowercase = 1e-4 def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ ) A = self.default_image_processor A = prepare_img() A = image_processor(A_ ,return_tensors='pt' ).to(A_ ) A = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A_ ,(1, 3, 384, 384) ) with torch.no_grad(): A = model(**A_ ) A = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) ) A = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) ) A = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval() A = self.default_image_processor A = prepare_img() A = image_processor(A_ ,return_tensors='pt' ).to(A_ ) A = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A_ ,(1, 3, 384, 384) ) with torch.no_grad(): A = model(**A_ ) # masks_queries_logits A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) A = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] A = torch.tensor(A_ ).to(A_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) ) # class_queries_logits A = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) ) A = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(A_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval() A = self.default_image_processor A = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,) A = inputs['pixel_values'].to(A_ ) A = [el.to(A_ ) for el in inputs['mask_labels']] A = [el.to(A_ ) for el in inputs['class_labels']] with torch.no_grad(): A = model(**A_ ) self.assertTrue(outputs.loss is not None )
22
0
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : int=2 ,A_ : int=3 ,A_ : int=4 ,A_ : Optional[int]=2 ,A_ : Dict=7 ,A_ : Any=True ,A_ : Dict=True ,A_ : Tuple=True ,A_ : List[Any]=True ,A_ : Dict=99 ,A_ : List[str]=36 ,A_ : str=2 ,A_ : Union[str, Any]=4 ,A_ : Any=37 ,A_ : int="gelu" ,A_ : Tuple=0.1 ,A_ : Tuple=0.1 ,A_ : Union[str, Any]=512 ,A_ : List[Any]=16 ,A_ : int=2 ,A_ : Union[str, Any]=0.02 ,A_ : Dict=6 ,A_ : str=6 ,A_ : List[str]=3 ,A_ : Optional[Any]=4 ,A_ : Optional[Any]=None ,A_ : Any=1000 ,) -> List[Any]: A = parent A = batch_size A = num_channels A = image_size A = patch_size A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = coordinate_size A = shape_size A = num_labels A = num_choices A = scope A = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) A = text_seq_length A = (image_size // patch_size) ** 2 + 1 A = self.text_seq_length + self.image_seq_length def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size ) A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox ) A = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A = bbox[i, j, 3] A = bbox[i, j, 1] A = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: A = bbox[i, j, 2] A = bbox[i, j, 0] A = tmp_coordinate A = tf.constant(A_ ) A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.text_seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size ) A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels ) A = LayoutLMvaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ) -> Dict: A = TFLayoutLMvaModel(config=A_ ) # text + image A = model(A_ ,pixel_values=A_ ,training=A_ ) A = model( A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,training=A_ ,) A = model(A_ ,bbox=A_ ,pixel_values=A_ ,training=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) # text only A = model(A_ ,training=A_ ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) ) # image only A = model({'pixel_values': pixel_values} ,training=A_ ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : int ,A_ : str ,A_ : List[str] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ) -> Dict: A = self.num_labels A = TFLayoutLMvaForSequenceClassification(config=A_ ) A = model( A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,training=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Any ,A_ : List[Any] ,A_ : List[str] ,A_ : Tuple ) -> Tuple: A = self.num_labels A = TFLayoutLMvaForTokenClassification(config=A_ ) A = model( A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,training=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : int ,A_ : Any ,A_ : int ,A_ : Union[str, Any] ,A_ : Any ) -> str: A = 2 A = TFLayoutLMvaForQuestionAnswering(config=A_ ) A = model( A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,start_positions=A_ ,end_positions=A_ ,training=A_ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.prepare_config_and_inputs() ((A) , (A) , (A) , (A) , (A) , (A) , (A) , (A)) = config_and_inputs A = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) _lowerCamelCase: Optional[int] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) _lowerCamelCase: List[str] = False _lowerCamelCase: str = False _lowerCamelCase: int = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Any ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : str ) -> Dict: return True def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : str ,A_ : List[str] ,A_ : Optional[int]=False ) -> dict: A = copy.deepcopy(A_ ) if model_class in get_values(A_ ): A = { k: tf.tile(tf.expand_dims(A_ ,1 ) ,(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(A_ ,tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(A_ ): A = tf.ones(self.model_tester.batch_size ,dtype=tf.intaa ) elif model_class in get_values(A_ ): A = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa ) A = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa ) elif model_class in get_values(A_ ): A = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa ) elif model_class in get_values(A_ ): A = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=tf.intaa ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = TFLayoutLMvaModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) if getattr(A_ ,'hf_compute_loss' ,A_ ): # The number of elements in the loss should be the same as the number of elements in the label A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ ) A = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() ,reverse=A_ )[0] ] A = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ ) A = prepared_for_class.pop('input_ids' ) A = model(A_ ,**A_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ ) A = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: A = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: A = -100 A = tf.convert_to_tensor(A_ ) A = model(A_ ,**A_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ ) A = model(A_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ ) # Get keys that were added with the _prepare_for_class function A = prepared_for_class.keys() - inputs_dict.keys() A = inspect.signature(model.call ).parameters A = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple A = {0: 'input_ids'} for label_key in label_keys: A = signature_names.index(A_ ) A = label_key A = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple A = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: A = prepared_for_class[value] A = tuple(A_ ) # Send to model A = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A_ ,A_ ,A_ ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(A_ ,A_ ,A_ ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFLayoutLMvaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: A = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) A = self.default_image_processor A = prepare_img() A = image_processor(images=A_ ,return_tensors='tf' ).pixel_values A = tf.constant([[1, 2]] ) A = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) ,axis=0 ) # forward pass A = model(input_ids=A_ ,bbox=A_ ,pixel_values=A_ ,training=A_ ) # verify the logits A = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape ,A_ ) A = tf.constant( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,A_ ,atol=1e-4 ) )
714
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]: A = parent A = 13 A = 7 A = True A = True A = True A = 99 A = 32 A = 2 A = 4 A = 37 A = 'gelu' A = 0.1 A = 0.1 A = 512 A = 16 A = 2 A = 0.02 A = 3 A = 4 A = None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = self.prepare_config_and_inputs() A = True A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict: A = TFEsmModel(config=A_ ) A = {'input_ids': input_ids, 'attention_mask': input_mask} A = model(A_ ) A = [input_ids, input_mask] A = model(A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]: A = True A = TFEsmModel(config=A_ ) A = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } A = model(A_ ) A = [input_ids, input_mask] A = model(A_ ,encoder_hidden_states=A_ ) # Also check the case where encoder outputs are not passed A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict: A = TFEsmForMaskedLM(config=A_ ) A = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]: A = self.num_labels A = TFEsmForTokenClassification(config=A_ ) A = {'input_ids': input_ids, 'attention_mask': input_mask} A = model(A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) _lowerCamelCase: List[str] = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: A = TFEsmModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFEsmModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @unittest.skip('Protein models do not support embedding resizing.' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip('Protein models do not support embedding resizing.' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A = model.get_bias() assert isinstance(A_ ,A_ ) for k, v in name.items(): assert isinstance(A_ ,tf.Variable ) else: A = model.get_output_embeddings() assert x is None A = model.get_bias() assert name is None @require_tf class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A = tf.constant([[0, 1, 2, 3, 4, 5]] ) A = model(A_ )[0] A = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) ,A_ ) # compare the actual values for a slice. A = tf.constant( [ [ [8.92_15_18, -10.58_98_14, -6.4_67_13_07], [-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15], [-7.78_12_47, -13.95_15_57, -3.74_05_92], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A = model(A_ )[0] # compare the actual values for a slice. A = tf.constant( [ [ [0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39], [0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22], [0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
22
0
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = XLMProphetNetTokenizer _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: List[str] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: super().setUp() # We have a SentencePiece fixture for testing A = XLMProphetNetTokenizer(A_ ,keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: A = '[PAD]' A = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) ,A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'[PAD]' ) self.assertEqual(vocab_keys[1] ,'[CLS]' ) self.assertEqual(vocab_keys[-1] ,'j' ) self.assertEqual(len(A_ ) ,1012 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size ,1012 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = XLMProphetNetTokenizer(A_ ,keep_accents=A_ ) A = tokenizer.tokenize('This is a test' ) self.assertListEqual(A_ ,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) A = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A_ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] ,) A = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] ,) A = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ] ,) @cached_property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: A = 'Hello World!' A = [3_5389, 6672, 49, 2] self.assertListEqual(A_ ,self.big_tokenizer.encode(A_ ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: # fmt: off A = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ ,model_name='microsoft/xprophetnet-large-wiki100-cased' ,revision='1acad1643ddd54a44df6a1b797ada8373685d90e' ,)
715
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys _lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') _lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split() _lowercase = '''|'''.join(sys.argv[1:]) _lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""") _lowercase = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
22
0
"""simple docstring""" import requests def _snake_case ( snake_case__ : str , snake_case__ : str ): A = {'Content-Type': 'application/json'} A = requests.post(snake_case__ , json={'text': message_body} , headers=snake_case__ ) if response.status_code != 200: A = ( 'Request to slack returned an error ' F'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(snake_case__ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
716
"""simple docstring""" import sys from collections import defaultdict class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ) -> int: A = [] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]: return self.node_position[vertex] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]: A = pos def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: A = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: A = 2 * start + 1 else: A = 2 * start + 2 if heap[smallest_child] < heap[start]: A , A = heap[smallest_child], positions[smallest_child] A , A = ( heap[start], positions[start], ) A , A = temp, tempa A = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,A_ ) self.top_to_bottom(A_ ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict: A = position[index] while index != 0: A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: A = heap[parent] A = position[parent] self.set_position(position[parent] ,A_ ) else: A = val A = temp self.set_position(A_ ,A_ ) break A = parent else: A = val A = temp self.set_position(A_ ,0 ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]: A = len(A_ ) // 2 - 1 for i in range(A_ ,-1 ,-1 ): self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]: A = positions[0] A = sys.maxsize self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ ) return temp def _snake_case ( snake_case__ : Dict ): A = Heap() A = [0] * len(snake_case__ ) A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph A = [] # Heap of Distance of vertices from their neighboring vertex A = [] for vertex in range(len(snake_case__ ) ): distance_tv.append(sys.maxsize ) positions.append(snake_case__ ) heap.node_position.append(snake_case__ ) A = [] A = 1 A = sys.maxsize for neighbor, distance in adjacency_list[0]: A = 0 A = distance heap.heapify(snake_case__ , snake_case__ ) for _ in range(1 , len(snake_case__ ) ): A = heap.delete_minimum(snake_case__ , snake_case__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) A = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(snake_case__ )] ): A = distance heap.bottom_to_top( snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ ) A = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _lowercase = int(input('''Enter number of edges: ''').strip()) _lowercase = defaultdict(list) for _ in range(edges_number): _lowercase = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
22
0
"""simple docstring""" import numpy as np from PIL import Image def _snake_case ( snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : int ): A = np.array(snake_case__ ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) A = 0 A = 0 A = 0 A = 0 # compute the shape of the output matrix A = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape A = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix A = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 A = 0 A = 0 return updated_arr def _snake_case ( snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : int ): A = np.array(snake_case__ ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) A = 0 A = 0 A = 0 A = 0 # compute the shape of the output matrix A = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape A = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix A = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 A = 0 A = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image _lowercase = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
717
"""simple docstring""" import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter _lowercase = True except ImportError: _lowercase = False _lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( snake_case__ : Namespace ): return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('add-new-model' ) add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' ) add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' ) add_new_model_parser.add_argument( '--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' ) add_new_model_parser.set_defaults(func=A_ ) def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]: A = testing A = testing_file A = path def _SCREAMING_SNAKE_CASE ( self : int ) -> int: warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.' ) if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(A_ ) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.' ) A = ( Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) A = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(A_ ) ) else: with open(self._testing_file ,'r' ) as configuration_file: A = json.load(A_ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,) A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + '/configuration.json' ,'r' ) as configuration_file: A = json.load(A_ ) A = configuration['lowercase_modelname'] A = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(F'{directory}/configuration.json' ) A = 'PyTorch' in generate_tensorflow_pytorch_and_flax A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax A = 'Flax' in generate_tensorflow_pytorch_and_flax A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(A_ ,exist_ok=A_ ) os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ ) # Tests require submodules as they have parent imports with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ): pass shutil.move( F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,) shutil.move( F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,) def remove_copy_lines(A_ : int ): with open(A_ ,'r' ) as f: A = f.readlines() with open(A_ ,'w' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(A_ ) if output_pytorch: if not self._testing: remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,) shutil.move( F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(A_ : str ,A_ : str ,A_ : List[str] ): # Create temp file A , A = mkstemp() A = False with fdopen(A_ ,'w' ) as new_file: with open(A_ ) as old_file: for line in old_file: new_file.write(A_ ) if line_to_copy_below in line: A = True for line_to_copy in lines_to_copy: new_file.write(A_ ) if not line_found: raise ValueError(F'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(A_ ,A_ ) # Remove original file remove(A_ ) # Move new file move(A_ ,A_ ) def skip_units(A_ : Dict ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(A_ : Tuple ): with open(A_ ) as datafile: A = [] A = False A = False for line in datafile: if "# To replace in: " in line and "##" not in line: A = line.split('"' )[1] A = skip_units(A_ ) elif "# Below: " in line and "##" not in line: A = line.split('"' )[1] A = skip_units(A_ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(A_ ,A_ ,A_ ) A = [] elif "# Replace with" in line and "##" not in line: A = [] elif "##" not in line: lines_to_copy.append(A_ ) remove(A_ ) replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(A_ )
22
0
"""simple docstring""" import argparse from collections import defaultdict import yaml _lowercase = '''docs/source/en/_toctree.yml''' def _snake_case ( snake_case__ : List[Any] ): A = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 A = [key for key, value in counts.items() if value > 1] A = [] for duplicate_key in duplicates: A = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) def _snake_case ( snake_case__ : List[str]=False ): with open(snake_case__ , encoding='utf-8' ) as f: A = yaml.safe_load(f.read() ) # Get to the API doc A = 0 while content[api_idx]["title"] != "API": api_idx += 1 A = content[api_idx]['sections'] # Then to the model doc A = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 A = api_doc[model_idx]['sections'] A = [(idx, section) for idx, section in enumerate(snake_case__ ) if 'sections' in section] A = False for idx, modality_doc in modalities_docs: A = modality_doc['sections'] A = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: A = True if overwrite: A = new_modality_doc if diff: if overwrite: A = model_doc A = api_doc with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _lowercase = parser.parse_args() check_model_doc(args.fix_and_overwrite)
718
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str: A = size if size is not None else {'height': 18, 'width': 18} A = parent A = batch_size A = num_channels A = image_size A = min_resolution A = max_resolution A = do_resize A = size A = do_normalize def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ImageGPTImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ ,'clusters' ) ) self.assertTrue(hasattr(A_ ,'do_resize' ) ) self.assertTrue(hasattr(A_ ,'size' ) ) self.assertTrue(hasattr(A_ ,'do_normalize' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} ) A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: A = self.image_processing_class(**self.image_processor_dict ) A = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,obj[key] ) ) else: self.assertEqual(obj[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(A_ ,'image_processor.json' ) image_processor_first.to_json_file(A_ ) A = self.image_processing_class.from_json_file(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(A_ ) A = self.image_processing_class.from_pretrained(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) @unittest.skip('ImageGPT requires clusters at initialization' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: pass def _snake_case ( ): A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' ) A = Image.open(dataset[4]['file'] ) A = Image.open(dataset[5]['file'] ) A = [imagea, imagea] return images @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' ) A = prepare_images() # test non-batched A = image_processing(images[0] ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(1, 1024) ) A = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ ) # test batched A = image_processing(A_ ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(2, 1024) ) A = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ )
22
0
"""simple docstring""" import random def _snake_case ( snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[str] ): A = a[left_index] A = left_index + 1 for j in range(left_index + 1 , snake_case__ ): if a[j] < pivot: A , A = a[i], a[j] i += 1 A , A = a[i - 1], a[left_index] return i - 1 def _snake_case ( snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : str ): if left < right: A = random.randint(snake_case__ , right - 1 ) A , A = ( a[left], a[pivot], ) # switches the pivot with the left most bound A = partition(snake_case__ , snake_case__ , snake_case__ ) quick_sort_random( snake_case__ , snake_case__ , snake_case__ ) # recursive quicksort to the left of the pivot point quick_sort_random( snake_case__ , pivot_index + 1 , snake_case__ ) # recursive quicksort to the right of the pivot point def _snake_case ( ): A = input('Enter numbers separated by a comma:\n' ).strip() A = [int(snake_case__ ) for item in user_input.split(',' )] quick_sort_random(snake_case__ , 0 , len(snake_case__ ) ) print(snake_case__ ) if __name__ == "__main__": main()
719
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def _snake_case ( snake_case__ : Optional[int] ): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' ) download_parser.add_argument( '--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,) download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' ) download_parser.set_defaults(func=A_ ) def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]: A = model A = cache A = force A = trust_remote_code def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
22
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: A = tempfile.mkdtemp() A = BlipImageProcessor() A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' ) A = BlipProcessor(A_ ,A_ ) processor.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : int ,**A_ : Dict ) -> Dict: return AutoProcessor.from_pretrained(self.tmpdirname ,**A_ ).tokenizer def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Tuple ) -> Tuple: return AutoProcessor.from_pretrained(self.tmpdirname ,**A_ ).image_processor def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] A = [Image.fromarray(np.moveaxis(A_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) A = self.get_image_processor(do_normalize=A_ ,padding_value=1.0 ) A = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=A_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,A_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: A = self.get_image_processor() A = self.get_tokenizer() A = BlipProcessor(tokenizer=A_ ,image_processor=A_ ) A = self.prepare_image_inputs() A = image_processor(A_ ,return_tensors='np' ) A = processor(images=A_ ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: A = self.get_image_processor() A = self.get_tokenizer() A = BlipProcessor(tokenizer=A_ ,image_processor=A_ ) A = 'lower newer' A = processor(text=A_ ) A = tokenizer(A_ ,return_token_type_ids=A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.get_image_processor() A = self.get_tokenizer() A = BlipProcessor(tokenizer=A_ ,image_processor=A_ ) A = 'lower newer' A = self.prepare_image_inputs() A = processor(text=A_ ,images=A_ ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'input_ids', 'attention_mask'] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: A = self.get_image_processor() A = self.get_tokenizer() A = BlipProcessor(tokenizer=A_ ,image_processor=A_ ) A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A = processor.batch_decode(A_ ) A = tokenizer.batch_decode(A_ ) self.assertListEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Any: A = self.get_image_processor() A = self.get_tokenizer() A = BlipProcessor(tokenizer=A_ ,image_processor=A_ ) A = 'lower newer' A = self.prepare_image_inputs() A = processor(text=A_ ,images=A_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'input_ids', 'attention_mask'] )
720
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''spm_char.model'''} _lowercase = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } _lowercase = { '''microsoft/speecht5_asr''': 10_24, '''microsoft/speecht5_tts''': 10_24, '''microsoft/speecht5_vc''': 10_24, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None: A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = vocab_file A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.sp_model.get_piece_size() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Any: A = self.__dict__.copy() A = None return state def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]: return self.sp_model.piece_to_id(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]: A = self.sp_model.IdToPiece(A_ ) return token def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]: A = [] A = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A_ ) + token A = [] else: current_sub_tokens.append(A_ ) out_string += self.sp_model.decode(A_ ) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) A = [1] if token_ids_a is None: return ([0] * len(A_ )) + suffix_ones return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
22
0
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __lowercase : '''simple docstring''' def __init__( self : Tuple ,A_ : Optional[int] ,A_ : Tuple=99 ,A_ : Any=13 ,A_ : List[str]=7 ,A_ : Any=9 ,A_ : Optional[int]=True ,A_ : Tuple=True ,A_ : List[Any]=False ,A_ : List[str]=32 ,A_ : List[Any]=5 ,A_ : Tuple=4 ,A_ : Optional[int]=37 ,A_ : List[str]=8 ,A_ : List[Any]=0.1 ,A_ : List[Any]=0.0_02 ,A_ : List[Any]=1 ,A_ : int=0 ,A_ : Optional[int]=0 ,A_ : Tuple=None ,A_ : Any=None ,) -> Union[str, Any]: A = parent A = batch_size A = encoder_seq_length A = decoder_seq_length # For common tests A = self.decoder_seq_length A = is_training A = use_attention_mask A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = d_ff A = relative_attention_num_buckets A = dropout_rate A = initializer_factor A = eos_token_id A = pad_token_id A = decoder_start_token_id A = None A = decoder_layers def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: return TaConfig.from_pretrained('google/umt5-base' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Any=None ,A_ : Dict=None ,A_ : List[Any]=None ,A_ : int=None ,A_ : List[str]=None ,) -> Optional[Any]: if attention_mask is None: A = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: A = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: A = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=A_ ) if decoder_head_mask is None: A = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=A_ ) if cross_attn_head_mask is None: A = torch.ones( config.num_decoder_layers ,config.num_attention_heads ,device=A_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size ) A = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input A = input_ids.clamp(self.pad_token_id + 1 ) A = decoder_input_ids.clamp(self.pad_token_id + 1 ) A = self.get_config() A = config.num_attention_heads A = self.prepare_inputs_dict(A_ ,A_ ,A_ ) return config, input_dict def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A , A = self.prepare_config_and_inputs() return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: return TaConfig( vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: return TaConfig( vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[Any] ,A_ : int ,A_ : Dict ,) -> Any: A = UMTaModel(config=A_ ) model.to(A_ ) model.eval() A = model( input_ids=A_ ,decoder_input_ids=A_ ,attention_mask=A_ ,decoder_attention_mask=A_ ,) A = model(input_ids=A_ ,decoder_input_ids=A_ ) A = result.last_hidden_state A = result.past_key_values A = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(A_ ) ,config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) ,4 ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ,A_ : List[str] ,A_ : str ,A_ : List[str] ,A_ : Tuple ,A_ : Union[str, Any] ,) -> Tuple: A = UMTaModel(config=A_ ).get_decoder().to(A_ ).eval() # first forward pass A = model(A_ ,use_cache=A_ ) A = model(A_ ) A = model(A_ ,use_cache=A_ ) self.parent.assertTrue(len(A_ ) == len(A_ ) ) self.parent.assertTrue(len(A_ ) == len(A_ ) + 1 ) A , A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A = ids_tensor((self.batch_size, 1) ,config.vocab_size ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = model(A_ )['last_hidden_state'] A = model(A_ ,past_key_values=A_ )['last_hidden_state'] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -1, random_slice_idx].detach() A = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Tuple ,A_ : str ,) -> Optional[Any]: A = UMTaModel(config=A_ ).to(A_ ).half().eval() A = model(**A_ )['last_hidden_state'] self.parent.assertFalse(torch.isnan(A_ ).any().item() ) @require_torch class __lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: int = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) _lowerCamelCase: Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else () _lowerCamelCase: List[str] = ( { '''conversational''': UMTaForConditionalGeneration, '''feature-extraction''': UMTaModel, '''summarization''': UMTaForConditionalGeneration, '''text2text-generation''': UMTaForConditionalGeneration, '''translation''': UMTaForConditionalGeneration, '''question-answering''': UMTaForQuestionAnswering, } if is_torch_available() else {} ) _lowerCamelCase: str = True _lowerCamelCase: List[str] = False _lowerCamelCase: str = False _lowerCamelCase: Optional[int] = True _lowerCamelCase: Optional[Any] = True # The small UMT5 model needs higher percentages for CPU/MP tests _lowerCamelCase: Tuple = [0.8, 0.9] def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: A = UMTaModelTester(self ) @unittest.skip('Test has a segmentation fault on torch 1.8.0' ) def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = self.model_tester.prepare_config_and_inputs() A = UMTaModel(config_and_inputs[0] ).to(A_ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( A_ ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F'{tmpdirname}/t5_test.onnx' ,export_params=A_ ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,) @unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: A = ['encoder_attentions', 'decoder_attentions', 'cross_attentions'] A = self.model_tester.prepare_config_and_inputs() A = config_and_inputs[0] A = UMTaForConditionalGeneration(A_ ).eval() model.to(A_ ) A = { 'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=A_ ), 'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=A_ ), 'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=A_ ), } for attn_name, (name, mask) in zip(A_ ,head_masking.items() ): A = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": A = torch.ones( config.num_decoder_layers ,config.num_heads ,device=A_ ) A = model.generate( config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=A_ ,return_dict_in_generate=A_ ,**A_ ,) # We check the state of decoder_attentions and cross_attentions just from the last step A = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 ) @unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: pass @require_torch @require_sentencepiece @require_tokenizers class __lowercase ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( 'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=A_ ).to(A_ ) A = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=A_ ,legacy=A_ ) A = [ 'Bonjour monsieur <extra_id_0> bien <extra_id_1>.', 'No se como puedo <extra_id_0>.', 'This is the reason why we <extra_id_0> them.', 'The <extra_id_0> walks in <extra_id_1>, seats', 'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.', ] A = tokenizer(A_ ,return_tensors='pt' ,padding=A_ ).input_ids # fmt: off A = torch.tensor( [ [ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(A_ ,A_ ) A = model.generate(input_ids.to(A_ ) ) A = [ '<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>', '<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', ] A = tokenizer.batch_decode(A_ ) self.assertEqual(A_ ,A_ )
721
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPFeatureExtractor'''] _lowercase = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
0
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _snake_case ( snake_case__ : Dict ): A = os.path.join(args.tf_model_dir , 'parameters.json' ) A = json.loads(open(snake_case__ ).read() ) if not params: raise ValueError( F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('.pt' ): A = args.output + '.pt' A = OrderedDict() with tf.device('/CPU:0' ): A = tf.train.load_checkpoint(args.tf_model_dir ) A = reader.get_variable_to_shape_map() for key_name in shapes.keys(): A = reader.get_tensor(snake_case__ ).astype(np.floataa ) if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ): continue if key_name.startswith('pasts/' ): if key_name.startswith('pasts/mlp' ): A = int(key_name[9] ) elif key_name.startswith('pasts/out' ): A = 8 A = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix A = torch.tensor(snake_case__ ) elif key_name.startswith('model/moe' ): A = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/switch_gating/kernel' ): A = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix A = torch.tensor(snake_case__ ) elif key_name.endswith('/softmlp/kernel' ): A = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix A = torch.tensor(snake_case__ ) elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ): A = key_name[-9:-7] for i in range(16 ): A = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer) A = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided A = torch.tensor(snake_case__ ) elif key_name.startswith('model/mlp' ): A = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/p1/kernel' ): A = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix A = torch.tensor(snake_case__ ) elif key_name.endswith('/p1/bias' ): A = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player A = vnp.copy() # same because it is one dimensional A = torch.tensor(snake_case__ ) elif key_name.endswith('/p2/kernel' ): A = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix A = torch.tensor(snake_case__ ) elif key_name.endswith('/p2/bias' ): A = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player A = vnp.copy() # same because it is one dimensional A = torch.tensor(snake_case__ ) elif key_name.startswith('model/ln' ): A = int(key_name[8:].split('/' )[0] ) if key_name.endswith('/b' ): A = 'model.blocks.%d.feed_forward.norm.bias' % player A = vnp.copy() # same because it is one dimensional A = torch.tensor(snake_case__ ) elif key_name.endswith('/g' ): A = 'model.blocks.%d.feed_forward.norm.weight' % player A = vnp.copy() # same because it is one dimensional A = torch.tensor(snake_case__ ) elif key_name.startswith('model/att' ): A = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/qkv/kernel' ): A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum A = state[:, 0, :, :] A = state[:, 1, :, :] A = state[:, 2, :, :] A = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix A = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix A = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix A = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player A = torch.tensor(snake_case__ ) A = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player A = torch.tensor(snake_case__ ) A = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player A = torch.tensor(snake_case__ ) elif key_name.endswith('/o/kernel' ): A = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player A = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix A = torch.tensor(snake_case__ ) elif key_name.startswith('model/an' ): A = int(key_name[8:].split('/' )[0] ) if key_name.endswith('/b' ): A = 'model.blocks.%d.self_attn.norm.bias' % player A = vnp.copy() # same because it is one dimensional A = torch.tensor(snake_case__ ) elif key_name.endswith('/g' ): A = 'model.blocks.%d.self_attn.norm.weight' % player A = vnp.copy() # same because it is one dimensional A = torch.tensor(snake_case__ ) elif ( key_name.startswith('model/wte' ) or key_name.startswith('model/wpe' ) or key_name.startswith('model/ete' ) ): A = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[ key_name[-3:] ] A = 'model.%s.weight' % nlayer A = vnp.copy() # same in embedded A = torch.tensor(snake_case__ ) if key_name.startswith('model/wte' ): A = 'lm_head.weight' A = vnp.copy() # same in embedded A = torch.tensor(snake_case__ ) elif key_name.startswith('model/wob' ): A = 'final_logits_bias' A = vnp.copy() # same in embedded A = state.reshape((1, -1) ) A = torch.tensor(snake_case__ ) elif key_name == "model/dense/kernel": A = 'model.last_project.weight' A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix A = torch.tensor(snake_case__ ) elif key_name == "model/dense_1/bias": A = 'model.last_project.bias' A = vnp.copy() # same because it is one dimensional A = torch.tensor(snake_case__ ) torch.save(snake_case__ , args.output ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser( description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''') parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''') _lowercase = parser.parse_args() convert_tf_gptsan_to_pt(args)
700
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: Dict = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[Any] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
22
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _lowercase = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = ['''pixel_values'''] def __init__( self : Union[str, Any] ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : bool = True ,**A_ : Optional[Any] ,) -> None: super().__init__(**A_ ) A = size if size is not None else {'shortest_edge': 224} A = get_size_dict(A_ ,default_to_square=A_ ) A = crop_size if crop_size is not None else {'height': 224, 'width': 224} A = get_size_dict(A_ ,default_to_square=A_ ,param_name='crop_size' ) A = do_resize A = size A = resample A = do_center_crop A = crop_size A = do_rescale A = rescale_factor A = do_normalize A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A = image_std if image_std is not None else OPENAI_CLIP_STD A = do_convert_rgb def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Union[str, Any] ,) -> np.ndarray: A = get_size_dict(A_ ,default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ ) return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : str ,) -> np.ndarray: A = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : np.ndarray ,A_ : Union[int, float] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Union[str, Any] ,) -> Union[str, Any]: return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[Any] ,) -> np.ndarray: return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : ImageInput ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : int = None ,A_ : bool = None ,A_ : float = None ,A_ : bool = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : bool = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**A_ : Dict ,) -> PIL.Image.Image: A = do_resize if do_resize is not None else self.do_resize A = size if size is not None else self.size A = get_size_dict(A_ ,param_name='size' ,default_to_square=A_ ) A = resample if resample is not None else self.resample A = do_center_crop if do_center_crop is not None else self.do_center_crop A = crop_size if crop_size is not None else self.crop_size A = get_size_dict(A_ ,param_name='crop_size' ,default_to_square=A_ ) A = do_rescale if do_rescale is not None else self.do_rescale A = rescale_factor if rescale_factor is not None else self.rescale_factor A = do_normalize if do_normalize is not None else self.do_normalize A = image_mean if image_mean is not None else self.image_mean A = image_std if image_std is not None else self.image_std A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: A = [convert_to_rgb(A_ ) for image in images] # All transformations expect numpy arrays. A = [to_numpy_array(A_ ) for image in images] if do_resize: A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images] if do_center_crop: A = [self.center_crop(image=A_ ,size=A_ ) for image in images] if do_rescale: A = [self.rescale(image=A_ ,scale=A_ ) for image in images] if do_normalize: A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images] A = [to_channel_dimension_format(A_ ,A_ ) for image in images] A = {'pixel_values': images} return BatchFeature(data=A_ ,tensor_type=A_ )
701
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _lowercase = 8 def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ): A = x.device A = (x * 255).int().clamp(0 , 255 ) A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' ) A = ((x & mask) != 0).float() A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' ) A = bits * 2 - 1 return bits def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ): A = x.device A = (x > 0).int() A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 ) A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 255).clamp(0.0 , 1.0 ) def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ): if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) A = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas A = self.alphas_cumprod[timestep] A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod A = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) A = self._get_variance(snake_case__ , snake_case__ ) A = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu' A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ ) A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise A = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ): A = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 ) else: A = None # 1. compute alphas, betas A = self.alphas_cumprod[t] A = self.alphas_cumprod[t - 1] if t > 0 else self.one A = 1 - alpha_prod_t A = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": A = model_output else: raise ValueError(F'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A = 0 if t > 0: A = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device ) A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise A = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]: super().__init__() A = bit_scale A = ( ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=A_ ,scheduler=A_ ) @torch.no_grad() def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]: A = torch.randn( (batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,) A = decimal_to_bits(A_ ) * self.bit_scale A = latents.to(self.device ) self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual A = self.unet(A_ ,A_ ).sample # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample A = bits_to_decimal(A_ ) if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
22
0
"""simple docstring""" import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''segformer''' def __init__( self : Any ,A_ : List[Any]=3 ,A_ : str=4 ,A_ : Dict=[2, 2, 2, 2] ,A_ : str=[8, 4, 2, 1] ,A_ : List[Any]=[32, 64, 160, 256] ,A_ : Dict=[7, 3, 3, 3] ,A_ : Optional[int]=[4, 2, 2, 2] ,A_ : int=[1, 2, 5, 8] ,A_ : List[str]=[4, 4, 4, 4] ,A_ : int="gelu" ,A_ : Optional[int]=0.0 ,A_ : str=0.0 ,A_ : Any=0.1 ,A_ : Dict=0.02 ,A_ : List[str]=0.1 ,A_ : List[str]=1e-6 ,A_ : Tuple=256 ,A_ : List[Any]=255 ,**A_ : Any ,) -> Any: super().__init__(**A_ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.' ,A_ ,) A = num_channels A = num_encoder_blocks A = depths A = sr_ratios A = hidden_sizes A = patch_sizes A = strides A = mlp_ratios A = num_attention_heads A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = classifier_dropout_prob A = initializer_range A = drop_path_rate A = layer_norm_eps A = decoder_hidden_size A = kwargs.get('reshape_last_stage' ,A_ ) A = semantic_loss_ignore_index class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> float: return 1e-4 @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return 12
702
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''yolos''' def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = num_detection_tokens A = use_mid_position_embeddings A = auxiliary_loss # Hungarian matcher A = class_cost A = bbox_cost A = giou_cost # Loss coefficients A = bbox_loss_coefficient A = giou_loss_coefficient A = eos_coefficient class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> float: return 1e-4 @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: return 12
22
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''tanreinama/GPTSAN-2.8B-spout_is_uniform''': ( '''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json''' ), } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: str = '''gptsan-japanese''' _lowerCamelCase: Dict = [ '''past_key_values''', ] _lowerCamelCase: Dict = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Dict ,A_ : Any=3_6000 ,A_ : Dict=1280 ,A_ : Any=1024 ,A_ : Optional[int]=8192 ,A_ : int=4096 ,A_ : Any=128 ,A_ : Union[str, Any]=10 ,A_ : str=0 ,A_ : Union[str, Any]=16 ,A_ : str=16 ,A_ : str=128 ,A_ : Any=0.0 ,A_ : Tuple=1e-5 ,A_ : int=False ,A_ : Optional[int]=0.0 ,A_ : int="float32" ,A_ : List[Any]=False ,A_ : Union[str, Any]=False ,A_ : Optional[Any]=False ,A_ : str=0.0_02 ,A_ : Optional[int]=False ,A_ : List[str]=True ,A_ : Optional[int]=3_5998 ,A_ : Dict=3_5995 ,A_ : str=3_5999 ,**A_ : List[Any] ,) -> Optional[int]: A = vocab_size A = max_position_embeddings A = d_model A = d_ff A = d_ext A = d_spout A = num_switch_layers A = num_ext_layers A = num_switch_layers + num_ext_layers A = num_heads A = num_experts A = expert_capacity A = dropout_rate A = layer_norm_epsilon A = router_bias A = router_jitter_noise A = router_dtype A = router_ignore_padding_tokens A = output_hidden_states A = output_attentions A = initializer_factor A = output_router_logits A = use_cache super().__init__( separator_token_id=A_ ,pad_token_id=A_ ,eos_token_id=A_ ,**A_ ,)
703
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
22
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowercase = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } _lowercase = { '''squeezebert/squeezebert-uncased''': 5_12, '''squeezebert/squeezebert-mnli''': 5_12, '''squeezebert/squeezebert-mnli-headless''': 5_12, } _lowercase = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[Any] = VOCAB_FILES_NAMES _lowerCamelCase: Tuple = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: str = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: List[str] = SqueezeBertTokenizer def __init__( self : Tuple ,A_ : List[Any]=None ,A_ : int=None ,A_ : Optional[Any]=True ,A_ : Optional[Any]="[UNK]" ,A_ : List[Any]="[SEP]" ,A_ : List[str]="[PAD]" ,A_ : Dict="[CLS]" ,A_ : int="[MASK]" ,A_ : str=True ,A_ : Any=None ,**A_ : int ,) -> str: super().__init__( A_ ,tokenizer_file=A_ ,do_lower_case=A_ ,unk_token=A_ ,sep_token=A_ ,pad_token=A_ ,cls_token=A_ ,mask_token=A_ ,tokenize_chinese_chars=A_ ,strip_accents=A_ ,**A_ ,) A = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,A_ ) != do_lower_case or normalizer_state.get('strip_accents' ,A_ ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,A_ ) != tokenize_chinese_chars ): A = getattr(A_ ,normalizer_state.pop('type' ) ) A = do_lower_case A = strip_accents A = tokenize_chinese_chars A = normalizer_class(**A_ ) A = do_lower_case def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Any ,A_ : Dict=None ) -> Any: A = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: A = self._tokenizer.model.save(A_ ,name=A_ ) return tuple(A_ )
704
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Dict = ['''pixel_values'''] def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None: super().__init__(**A_ ) A = size if size is not None else {'shortest_edge': 256} A = get_size_dict(A_ ,default_to_square=A_ ) A = crop_size if crop_size is not None else {'height': 224, 'width': 224} A = get_size_dict(A_ ,param_name='crop_size' ) A = do_resize A = size A = resample A = do_center_crop A = crop_size A = do_rescale A = rescale_factor A = do_normalize A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray: A = get_size_dict(A_ ,default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ ) return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray: A = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray: return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray: return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]: A = do_resize if do_resize is not None else self.do_resize A = size if size is not None else self.size A = get_size_dict(A_ ,default_to_square=A_ ) A = resample if resample is not None else self.resample A = do_center_crop if do_center_crop is not None else self.do_center_crop A = crop_size if crop_size is not None else self.crop_size A = get_size_dict(A_ ,param_name='crop_size' ) A = do_rescale if do_rescale is not None else self.do_rescale A = rescale_factor if rescale_factor is not None else self.rescale_factor A = do_normalize if do_normalize is not None else self.do_normalize A = image_mean if image_mean is not None else self.image_mean A = image_std if image_std is not None else self.image_std A = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. A = [to_numpy_array(A_ ) for image in images] if do_resize: A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images] if do_center_crop: A = [self.center_crop(image=A_ ,size=A_ ) for image in images] if do_rescale: A = [self.rescale(image=A_ ,scale=A_ ) for image in images] if do_normalize: A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images] A = [to_channel_dimension_format(A_ ,A_ ) for image in images] A = {'pixel_values': images} return BatchFeature(data=A_ ,tensor_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str: A = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(A_ ): A = target_sizes.numpy() A = [] for idx in range(len(A_ ) ): A = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ ) A = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: A = logits.argmax(dim=1 ) A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
0
"""simple docstring""" import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def _snake_case ( snake_case__ : Optional[int] , snake_case__ : List[Any] ): A = checkpoint A = {} A = vae_state_dict['encoder.conv_in.weight'] A = vae_state_dict['encoder.conv_in.bias'] A = vae_state_dict['encoder.conv_out.weight'] A = vae_state_dict['encoder.conv_out.bias'] A = vae_state_dict['encoder.norm_out.weight'] A = vae_state_dict['encoder.norm_out.bias'] A = vae_state_dict['decoder.conv_in.weight'] A = vae_state_dict['decoder.conv_in.bias'] A = vae_state_dict['decoder.conv_out.weight'] A = vae_state_dict['decoder.conv_out.bias'] A = vae_state_dict['decoder.norm_out.weight'] A = vae_state_dict['decoder.norm_out.bias'] A = vae_state_dict['quant_conv.weight'] A = vae_state_dict['quant_conv.bias'] A = vae_state_dict['post_quant_conv.weight'] A = vae_state_dict['post_quant_conv.bias'] # Retrieves the keys for the encoder down blocks only A = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} ) A = { layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the decoder up blocks only A = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} ) A = { layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(snake_case__ ) } for i in range(snake_case__ ): A = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key] if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: A = vae_state_dict.pop( F'encoder.down.{i}.downsample.conv.weight' ) A = vae_state_dict.pop( F'encoder.down.{i}.downsample.conv.bias' ) A = renew_vae_resnet_paths(snake_case__ ) A = {'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) A = [key for key in vae_state_dict if 'encoder.mid.block' in key] A = 2 for i in range(1 , num_mid_res_blocks + 1 ): A = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key] A = renew_vae_resnet_paths(snake_case__ ) A = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) A = [key for key in vae_state_dict if 'encoder.mid.attn' in key] A = renew_vae_attention_paths(snake_case__ ) A = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) conv_attn_to_linear(snake_case__ ) for i in range(snake_case__ ): A = num_up_blocks - 1 - i A = [ key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key ] if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: A = vae_state_dict[ F'decoder.up.{block_id}.upsample.conv.weight' ] A = vae_state_dict[ F'decoder.up.{block_id}.upsample.conv.bias' ] A = renew_vae_resnet_paths(snake_case__ ) A = {'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) A = [key for key in vae_state_dict if 'decoder.mid.block' in key] A = 2 for i in range(1 , num_mid_res_blocks + 1 ): A = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key] A = renew_vae_resnet_paths(snake_case__ ) A = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) A = [key for key in vae_state_dict if 'decoder.mid.attn' in key] A = renew_vae_attention_paths(snake_case__ ) A = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) conv_attn_to_linear(snake_case__ ) return new_checkpoint def _snake_case ( snake_case__ : str , snake_case__ : str , ): # Only support V1 A = requests.get( ' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' ) A = io.BytesIO(r.content ) A = OmegaConf.load(snake_case__ ) A = 512 A = 'cuda' if torch.cuda.is_available() else 'cpu' if checkpoint_path.endswith('safetensors' ): from safetensors import safe_open A = {} with safe_open(snake_case__ , framework='pt' , device='cpu' ) as f: for key in f.keys(): A = f.get_tensor(snake_case__ ) else: A = torch.load(snake_case__ , map_location=snake_case__ )['state_dict'] # Convert the VAE model. A = create_vae_diffusers_config(snake_case__ , image_size=snake_case__ ) A = custom_convert_ldm_vae_checkpoint(snake_case__ , snake_case__ ) A = AutoencoderKL(**snake_case__ ) vae.load_state_dict(snake_case__ ) vae.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") _lowercase = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
705
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 _lowercase = data_utils.TransfoXLTokenizer _lowercase = data_utils.TransfoXLCorpus _lowercase = data_utils _lowercase = data_utils def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(snake_case__ , 'rb' ) as fp: A = pickle.load(snake_case__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'Save vocabulary to {pytorch_vocab_dump_path}' ) A = corpus.vocab.__dict__ torch.save(snake_case__ , snake_case__ ) A = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , snake_case__ ) A = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(snake_case__ , snake_case__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model A = os.path.abspath(snake_case__ ) A = os.path.abspath(snake_case__ ) print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": A = TransfoXLConfig() else: A = TransfoXLConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) A = TransfoXLLMHeadModel(snake_case__ ) A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model A = os.path.join(snake_case__ , snake_case__ ) A = os.path.join(snake_case__ , snake_case__ ) print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' ) torch.save(model.state_dict() , snake_case__ ) print(F'Save configuration file to {os.path.abspath(snake_case__ )}' ) with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) _lowercase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
22
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = '''sew-d''' def __init__( self : List[str] ,A_ : Dict=32 ,A_ : Optional[int]=768 ,A_ : Optional[Any]=12 ,A_ : Optional[int]=12 ,A_ : Tuple=3072 ,A_ : Any=2 ,A_ : List[Any]=512 ,A_ : Tuple=256 ,A_ : Optional[int]=True ,A_ : List[str]=True ,A_ : List[Any]=("p2c", "c2p") ,A_ : Dict="layer_norm" ,A_ : List[str]="gelu_python" ,A_ : int=0.1 ,A_ : Tuple=0.1 ,A_ : Dict=0.1 ,A_ : List[str]=0.0 ,A_ : List[str]=0.1 ,A_ : Optional[Any]=0.02 ,A_ : Optional[int]=1e-7 ,A_ : Optional[Any]=1e-5 ,A_ : Optional[int]="group" ,A_ : Union[str, Any]="gelu" ,A_ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,A_ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,A_ : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,A_ : Union[str, Any]=False ,A_ : str=128 ,A_ : Tuple=16 ,A_ : List[Any]=True ,A_ : Tuple=0.05 ,A_ : List[Any]=10 ,A_ : List[str]=2 ,A_ : str=0.0 ,A_ : Union[str, Any]=10 ,A_ : Tuple=0 ,A_ : Tuple="mean" ,A_ : Union[str, Any]=False ,A_ : Dict=False ,A_ : str=256 ,A_ : int=0 ,A_ : Dict=1 ,A_ : List[str]=2 ,**A_ : Tuple ,) -> Any: super().__init__(**A_ ,pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ) A = hidden_size A = feat_extract_norm A = feat_extract_activation A = list(A_ ) A = list(A_ ) A = list(A_ ) A = conv_bias A = num_conv_pos_embeddings A = num_conv_pos_embedding_groups A = len(self.conv_dim ) A = num_hidden_layers A = intermediate_size A = squeeze_factor A = max_position_embeddings A = position_buckets A = share_att_key A = relative_attention A = norm_rel_ebd A = list(A_ ) A = hidden_act A = num_attention_heads A = hidden_dropout A = attention_dropout A = activation_dropout A = feat_proj_dropout A = final_dropout A = layer_norm_eps A = feature_layer_norm_eps A = initializer_range A = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A = apply_spec_augment A = mask_time_prob A = mask_time_length A = mask_time_min_masks A = mask_feature_prob A = mask_feature_length A = mask_feature_min_masks # ctc loss A = ctc_loss_reduction A = ctc_zero_infinity # sequence classification A = use_weighted_layer_sum A = classifier_proj_size @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: return functools.reduce(operator.mul ,self.conv_stride ,1 )
706
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ) -> int: A = {} def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int: if self.graph.get(A_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A = [[w, v]] if not self.graph.get(A_ ): A = [] def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return list(self.graph ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]: if self.graph.get(A_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str: if s == d: return [] A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return visited def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int: if c == -1: A = floor(random() * 1_0000 ) + 10 for i in range(A_ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A = floor(random() * c ) + 1 if n != i: self.add_pair(A_ ,A_ ,1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]: A = deque() A = [] if s == -2: A = list(self.graph )[0] d.append(A_ ) visited.append(A_ ) while d: A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any: A = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str: return len(self.graph[u] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any: A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s A = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return sorted_nodes def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return False def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str: A = time() self.dfs(A_ ,A_ ) A = time() return end - begin def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict: A = time() self.bfs(A_ ) A = time() return end - begin class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ) -> Tuple: A = {} def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict: # check if the u exists if self.graph.get(A_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A = [[w, v]] # add the other way if self.graph.get(A_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A = [[w, u]] def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]: if self.graph.get(A_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A_ ) # the other way round if self.graph.get(A_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int: if s == d: return [] A = [] A = [] if s == -2: A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = ss # check if se have reached the starting point if len(A_ ) == 0: return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]: if c == -1: A = floor(random() * 1_0000 ) + 10 for i in range(A_ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A = floor(random() * c ) + 1 if n != i: self.add_pair(A_ ,A_ ,1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]: A = deque() A = [] if s == -2: A = list(self.graph )[0] d.append(A_ ) visited.append(A_ ) while d: A = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]: return len(self.graph[u] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return list(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = [] A = [] A = list(self.graph )[0] stack.append(A_ ) visited.append(A_ ) A = -2 A = [] A = s A = False A = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A = len(A_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A = node[1] break # check if all the children are visited if s == ss: stack.pop() A = True if len(A_ ) != 0: A = stack[len(A_ ) - 1] else: A = False indirect_parents.append(A_ ) A = s A = ss # check if se have reached the starting point if len(A_ ) == 0: return False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: return list(self.graph ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any: A = time() self.dfs(A_ ,A_ ) A = time() return end - begin def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]: A = time() self.bfs(A_ ) A = time() return end - begin
22
0
"""simple docstring""" import requests from bsa import BeautifulSoup def _snake_case ( snake_case__ : str = "AAPL" ): A = F'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}' A = BeautifulSoup(requests.get(snake_case__ ).text , 'html.parser' ) A = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_ ).find('span' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
707
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _snake_case ( snake_case__ : str = "isbn/0140328726" ): A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: A = F'{olid} is not a valid Open Library olid' raise ValueError(snake_case__ ) return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json() def _snake_case ( snake_case__ : dict ): A = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} A = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] A = data['First sentence']['value'] for key, value in data.items(): if isinstance(snake_case__ , snake_case__ ): A = ', '.join(snake_case__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: _lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(F"""\nSearching Open Library for ISBN: {isbn}...\n""") try: _lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}""")) print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"""Sorry, there are no results for ISBN: {isbn}.""")
22
0
"""simple docstring""" _lowercase = 9.80_665 def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float = g ): if fluid_density <= 0: raise ValueError('Impossible fluid density' ) if volume < 0: raise ValueError('Impossible Object volume' ) if gravity <= 0: raise ValueError('Impossible Gravity' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
708
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''], '''tokenization_perceiver''': ['''PerceiverTokenizer'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''PerceiverFeatureExtractor'''] _lowercase = ['''PerceiverImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PerceiverForImageClassificationConvProcessing''', '''PerceiverForImageClassificationFourier''', '''PerceiverForImageClassificationLearned''', '''PerceiverForMaskedLM''', '''PerceiverForMultimodalAutoencoding''', '''PerceiverForOpticalFlow''', '''PerceiverForSequenceClassification''', '''PerceiverLayer''', '''PerceiverModel''', '''PerceiverPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
0
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _lowercase = 8 def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ): A = x.device A = (x * 255).int().clamp(0 , 255 ) A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' ) A = ((x & mask) != 0).float() A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' ) A = bits * 2 - 1 return bits def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ): A = x.device A = (x > 0).int() A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 ) A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 255).clamp(0.0 , 1.0 ) def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ): if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) A = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas A = self.alphas_cumprod[timestep] A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod A = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) A = self._get_variance(snake_case__ , snake_case__ ) A = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu' A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ ) A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise A = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ): A = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 ) else: A = None # 1. compute alphas, betas A = self.alphas_cumprod[t] A = self.alphas_cumprod[t - 1] if t > 0 else self.one A = 1 - alpha_prod_t A = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": A = model_output else: raise ValueError(F'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A = 0 if t > 0: A = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device ) A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise A = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]: super().__init__() A = bit_scale A = ( ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=A_ ,scheduler=A_ ) @torch.no_grad() def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]: A = torch.randn( (batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,) A = decimal_to_bits(A_ ) * self.bit_scale A = latents.to(self.device ) self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual A = self.unet(A_ ,A_ ).sample # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample A = bits_to_decimal(A_ ) if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
709
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def _snake_case ( snake_case__ : int ): A = SwinvaConfig() A = swinva_name.split('_' ) A = name_split[1] if "to" in name_split[3]: A = int(name_split[3][-3:] ) else: A = int(name_split[3] ) if "to" in name_split[2]: A = int(name_split[2][-2:] ) else: A = int(name_split[2][6:] ) if model_size == "tiny": A = 96 A = (2, 2, 6, 2) A = (3, 6, 12, 24) elif model_size == "small": A = 96 A = (2, 2, 18, 2) A = (3, 6, 12, 24) elif model_size == "base": A = 128 A = (2, 2, 18, 2) A = (4, 8, 16, 32) else: A = 192 A = (2, 2, 18, 2) A = (6, 12, 24, 48) if "to" in swinva_name: A = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): A = 2_1841 A = 'huggingface/label-files' A = 'imagenet-22k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} else: A = 1000 A = 'huggingface/label-files' A = 'imagenet-1k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} A = img_size A = num_classes A = embed_dim A = depths A = num_heads A = window_size return config def _snake_case ( snake_case__ : List[Any] ): if "patch_embed.proj" in name: A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: A = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: A = 'encoder.' + name if "attn.proj" in name: A = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A = name.replace('attn' , 'attention.self' ) if "norm1" in name: A = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: A = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: A = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: A = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if name == "norm.weight": A = 'layernorm.weight' if name == "norm.bias": A = 'layernorm.bias' if "head" in name: A = name.replace('head' , 'classifier' ) else: A = 'swinv2.' + name return name def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ): for key in orig_state_dict.copy().keys(): A = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A = key.split('.' ) A = int(key_split[1] ) A = int(key_split[3] ) A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A = val[:dim, :] A = val[dim : dim * 2, :] A = val[-dim:, :] else: A = val[:dim] A = val[ dim : dim * 2 ] A = val[-dim:] else: A = val return orig_state_dict def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ): A = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A = get_swinva_config(snake_case__ ) A = SwinvaForImageClassification(snake_case__ ) model.eval() A = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) ) A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A = image_processor(images=snake_case__ , return_tensors='pt' ) A = timm_model(inputs['pixel_values'] ) A = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) model.push_to_hub( repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swinv2_name''', default='''swinv2_tiny_patch4_window8_256''', type=str, help='''Name of the Swinv2 timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowercase = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
22
0
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowercase = 16 _lowercase = 32 def _snake_case ( snake_case__ : Accelerator , snake_case__ : DatasetDict , snake_case__ : List[int] , snake_case__ : List[int] , snake_case__ : int = 16 ): A = AutoTokenizer.from_pretrained('bert-base-cased' ) A = DatasetDict( { 'train': dataset['train'].select(snake_case__ ), 'validation': dataset['train'].select(snake_case__ ), 'test': dataset['validation'], } ) def tokenize_function(snake_case__ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case__ : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. A = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A = 16 elif accelerator.mixed_precision != "no": A = 8 else: A = None return tokenizer.pad( snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , ) # Instantiate dataloaders. A = DataLoader( tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) A = DataLoader( tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) A = DataLoader( tokenized_datasets['test'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader, test_dataloader def _snake_case ( snake_case__ : str , snake_case__ : List[str] ): # New Code # A = [] # Download the dataset A = load_dataset('glue' , 'mrpc' ) # Create our splits A = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A = config['lr'] A = int(config['num_epochs'] ) A = int(config['seed'] ) A = int(config['batch_size'] ) A = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation A = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A = batch_size // MAX_GPU_BATCH_SIZE A = MAX_GPU_BATCH_SIZE set_seed(snake_case__ ) # New Code # # Create our folds: A = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] ) A = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(snake_case__ ): A , A , A = get_fold_dataloaders( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A = model.to(accelerator.device ) # Instantiate optimizer A = AdamW(params=model.parameters() , lr=snake_case__ ) # Instantiate scheduler A = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A = model(**snake_case__ ) A = outputs.loss A = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**snake_case__ ) A = outputs.logits.argmax(dim=-1 ) A , A = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , snake_case__ ) # New Code # # We also run predictions on the test set at the very end A = [] for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**snake_case__ ) A = outputs.logits A , A = accelerator.gather_for_metrics((predictions, batch['labels']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(snake_case__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: A = torch.cat(snake_case__ , dim=0 ) A = torch.stack(snake_case__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) A = metric.compute(predictions=snake_case__ , references=snake_case__ ) accelerator.print('Average test metrics from all folds:' , snake_case__ ) def _snake_case ( ): A = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) # New Code # parser.add_argument('--num_folds' , type=snake_case__ , default=3 , help='The number of splits to perform across the dataset' ) A = parser.parse_args() A = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
710
"""simple docstring""" from math import pi, sqrt def _snake_case ( snake_case__ : float ): if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(snake_case__ ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(snake_case__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _snake_case ( ): assert gamma(0.5 ) == sqrt(snake_case__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _lowercase = 1.0 while num: _lowercase = float(input('''Gamma of: ''')) print(F"""gamma({num}) = {gamma(num)}""") print('''\nEnter 0 to exit...''')
22
0
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Dict ,A_ : Any ,A_ : Union[str, Any] ) -> List[Any]: A = name A = value A = weight def __repr__( self : Dict ) -> Union[str, Any]: return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return self.value def _SCREAMING_SNAKE_CASE ( self : str ) -> str: return self.name def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return self.weight def _SCREAMING_SNAKE_CASE ( self : str ) -> str: return self.value / self.weight def _snake_case ( snake_case__ : Any , snake_case__ : Any , snake_case__ : Dict ): A = [] for i in range(len(snake_case__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any ): A = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ ) A = [] A , A = 0.0, 0.0 for i in range(len(snake_case__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _snake_case ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
711
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: torch.FloatTensor class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]: super().__init__() A = layers_per_block A = torch.nn.Convad( A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,) A = None A = nn.ModuleList([] ) # down A = block_out_channels[0] for i, down_block_type in enumerate(A_ ): A = output_channel A = block_out_channels[i] A = i == len(A_ ) - 1 A = get_down_block( A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,) self.down_blocks.append(A_ ) # mid A = UNetMidBlockaD( in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,) # out A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 ) A = nn.SiLU() A = 2 * out_channels if double_z else out_channels A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 ) A = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]: A = x A = self.conv_in(A_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(A_ : Dict ): def custom_forward(*A_ : Tuple ): return module(*A_ ) return custom_forward # down if is_torch_version('>=' ,'1.11.0' ): for down_block in self.down_blocks: A = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ ) # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ ) else: for down_block in self.down_blocks: A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ) # middle A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ ) else: # down for down_block in self.down_blocks: A = down_block(A_ ) # middle A = self.mid_block(A_ ) # post-process A = self.conv_norm_out(A_ ) A = self.conv_act(A_ ) A = self.conv_out(A_ ) return sample class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any: super().__init__() A = layers_per_block A = nn.Convad( A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,) A = None A = nn.ModuleList([] ) A = in_channels if norm_type == 'spatial' else None # mid A = UNetMidBlockaD( in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,) # up A = list(reversed(A_ ) ) A = reversed_block_out_channels[0] for i, up_block_type in enumerate(A_ ): A = output_channel A = reversed_block_out_channels[i] A = i == len(A_ ) - 1 A = get_up_block( A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,) self.up_blocks.append(A_ ) A = output_channel # out if norm_type == "spatial": A = SpatialNorm(block_out_channels[0] ,A_ ) else: A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 ) A = nn.SiLU() A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 ) A = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any: A = z A = self.conv_in(A_ ) A = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(A_ : List[Any] ): def custom_forward(*A_ : Tuple ): return module(*A_ ) return custom_forward if is_torch_version('>=' ,'1.11.0' ): # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ ) else: # middle A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,A_ ,A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ ) else: # middle A = self.mid_block(A_ ,A_ ) A = sample.to(A_ ) # up for up_block in self.up_blocks: A = up_block(A_ ,A_ ) # post-process if latent_embeds is None: A = self.conv_norm_out(A_ ) else: A = self.conv_norm_out(A_ ,A_ ) A = self.conv_act(A_ ) A = self.conv_out(A_ ) return sample class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]: super().__init__() A = n_e A = vq_embed_dim A = beta A = legacy A = nn.Embedding(self.n_e ,self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e ) A = remap if self.remap is not None: self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) ) A = self.used.shape[0] A = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": A = self.re_embed A = self.re_embed + 1 print( F'Remapping {self.n_e} indices to {self.re_embed} indices. ' F'Using {self.unknown_index} for unknown indices.' ) else: A = n_e A = sane_index_shape def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any: A = inds.shape assert len(A_ ) > 1 A = inds.reshape(ishape[0] ,-1 ) A = self.used.to(A_ ) A = (inds[:, :, None] == used[None, None, ...]).long() A = match.argmax(-1 ) A = match.sum(2 ) < 1 if self.unknown_index == "random": A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device ) else: A = self.unknown_index return new.reshape(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = inds.shape assert len(A_ ) > 1 A = inds.reshape(ishape[0] ,-1 ) A = self.used.to(A_ ) if self.re_embed > self.used.shape[0]: # extra token A = 0 # simply set to zero A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ ) return back.reshape(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str: # reshape z -> (batch, height, width, channel) and flatten A = z.permute(0 ,2 ,3 ,1 ).contiguous() A = z.view(-1 ,self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 ) A = self.embedding(A_ ).view(z.shape ) A = None A = None # compute loss for embedding if not self.legacy: A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients A = z + (z_q - z).detach() # reshape back to match original input shape A = z_q.permute(0 ,3 ,1 ,2 ).contiguous() if self.remap is not None: A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis A = self.remap_to_used(A_ ) A = min_encoding_indices.reshape(-1 ,1 ) # flatten if self.sane_index_shape: A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]: # shape specifying (batch, height, width, channel) if self.remap is not None: A = indices.reshape(shape[0] ,-1 ) # add batch axis A = self.unmap_to_all(A_ ) A = indices.reshape(-1 ) # flatten again # get quantized latent vectors A = self.embedding(A_ ) if shape is not None: A = z_q.view(A_ ) # reshape back to match original input shape A = z_q.permute(0 ,3 ,1 ,2 ).contiguous() return z_q class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]: A = parameters A , A = torch.chunk(A_ ,2 ,dim=1 ) A = torch.clamp(self.logvar ,-30.0 ,20.0 ) A = deterministic A = torch.exp(0.5 * self.logvar ) A = torch.exp(self.logvar ) if self.deterministic: A = A = torch.zeros_like( self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype A = randn_tensor( self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype ) A = self.mean + self.std * sample return x def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean ,2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar ,dim=[1, 2, 3] ,) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]: if self.deterministic: return torch.Tensor([0.0] ) A = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: return self.mean
22
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPFeatureExtractor'''] _lowercase = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
712
"""simple docstring""" def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ): A = len(snake_case__ ) A = [[0] * n for i in range(snake_case__ )] for i in range(snake_case__ ): A = y_points[i] for i in range(2 , snake_case__ ): for j in range(snake_case__ , snake_case__ ): A = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
22
0
from math import factorial _lowercase = {str(d): factorial(d) for d in range(10)} def _snake_case ( snake_case__ : int ): return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) ) def _snake_case ( ): A = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i ) if __name__ == "__main__": print(F"""{solution() = }""")
713
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]: A = parent A = batch_size A = is_training A = use_auxiliary_loss A = num_queries A = num_channels A = min_size A = max_size A = num_labels A = hidden_dim A = hidden_dim def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( A_ ) A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ ) A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5 ).float() A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long() A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: A = MaskaFormerConfig( hidden_size=self.hidden_dim ,) A = self.num_queries A = self.num_labels A = [1, 1, 1, 1] A = self.num_channels A = 64 A = 128 A = self.hidden_dim A = self.hidden_dim A = self.hidden_dim return config def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A , A , A , A , A = self.prepare_config_and_inputs() A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]: A = output.encoder_hidden_states A = output.pixel_decoder_hidden_states A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A_ ) ,config.decoder_layers ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str: with torch.no_grad(): A = MaskaFormerModel(config=A_ ) model.to(A_ ) model.eval() A = model(pixel_values=A_ ,pixel_mask=A_ ) A = model(A_ ,output_hidden_states=A_ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]: A = MaskaFormerForUniversalSegmentation(config=A_ ) model.to(A_ ) model.eval() def comm_check_on_output(A_ : str ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): A = model(pixel_values=A_ ,pixel_mask=A_ ) A = model(A_ ) comm_check_on_output(A_ ) A = model( pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ ) comm_check_on_output(A_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} _lowerCamelCase: int = False _lowerCamelCase: Dict = False _lowerCamelCase: List[str] = False _lowerCamelCase: int = False def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = MaskaFormerModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: A = MaskaFormerModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = (self.model_tester.min_size,) * 2 A = { 'pixel_values': torch.randn((2, 3, *size) ,device=A_ ), 'mask_labels': torch.randn((2, 10, *size) ,device=A_ ), 'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(), } A = self.model_tester.get_config() A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ ) A = model(**A_ ) self.assertTrue(outputs.loss is not None ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ).to(A_ ) A = model(**A_ ,output_attentions=A_ ) self.assertTrue(outputs.attentions is not None ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: if not self.model_tester.is_training: return A = self.all_model_classes[1] A , A , A , A , A = self.model_tester.prepare_config_and_inputs() A = model_class(A_ ) model.to(A_ ) model.train() A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = self.all_model_classes[1] A , A , A , A , A = self.model_tester.prepare_config_and_inputs() A = True A = True A = model_class(A_ ).to(A_ ) model.train() A = model(A_ ,mask_labels=A_ ,class_labels=A_ ) A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=A_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowercase = 1e-4 def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ ) A = self.default_image_processor A = prepare_img() A = image_processor(A_ ,return_tensors='pt' ).to(A_ ) A = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A_ ,(1, 3, 384, 384) ) with torch.no_grad(): A = model(**A_ ) A = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) ) A = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) ) A = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval() A = self.default_image_processor A = prepare_img() A = image_processor(A_ ,return_tensors='pt' ).to(A_ ) A = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A_ ,(1, 3, 384, 384) ) with torch.no_grad(): A = model(**A_ ) # masks_queries_logits A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) A = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] A = torch.tensor(A_ ).to(A_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) ) # class_queries_logits A = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) ) A = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(A_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval() A = self.default_image_processor A = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,) A = inputs['pixel_values'].to(A_ ) A = [el.to(A_ ) for el in inputs['mask_labels']] A = [el.to(A_ ) for el in inputs['class_labels']] with torch.no_grad(): A = model(**A_ ) self.assertTrue(outputs.loss is not None )
22
0
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex _lowercase = logging.getLogger(__name__) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ) -> Tuple: A = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ,A_ : Any ,A_ : Dict ,A_ : Union[str, Any] ) -> Tuple: if not self.initialized: A = RagRetriever( A_ ,question_encoder_tokenizer=A_ ,generator_tokenizer=A_ ,index=A_ ,init_retrieval=A_ ,) A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: self.retriever.index.init_index() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : int ) -> List[Any]: A , A = self.retriever._main_retrieve(A_ ,A_ ) return doc_ids, retrieved_doc_embeds class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : int ,A_ : Any ,A_ : Optional[Any] ,A_ : Dict ,A_ : str=None ) -> List[str]: if index is not None and index.is_initialized() and len(A_ ) > 0: raise ValueError( 'When using Ray for distributed fine-tuning, ' 'you\'ll need to provide the paths instead, ' 'as the dataset and the index are loaded ' 'separately. More info in examples/rag/use_own_knowledge_dataset.py ' ) super().__init__( A_ ,question_encoder_tokenizer=A_ ,generator_tokenizer=A_ ,index=A_ ,init_retrieval=A_ ,) A = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(A_ ,A_ ,A_ ,A_ ) for worker in self.retrieval_workers ] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> int: logger.info('initializing retrieval' ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : int ) -> Optional[Any]: if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. A = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )] A , A = ray.get(random_worker.retrieve.remote(A_ ,A_ ) ) else: A , A = self._main_retrieve(A_ ,A_ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ,A_ : Optional[int]=None ,**A_ : Any ) -> str: return super(A_ ,cls ).get_tokenizers(A_ ,A_ ,**A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : int ,A_ : Union[str, Any] ,A_ : int=None ,**A_ : List[str] ) -> Union[str, Any]: A = kwargs.pop('config' ,A_ ) or RagConfig.from_pretrained(A_ ,**A_ ) A = RagTokenizer.from_pretrained(A_ ,config=A_ ) A = rag_tokenizer.question_encoder A = rag_tokenizer.generator if indexed_dataset is not None: A = 'custom' A = CustomHFIndex(config.retrieval_vector_size ,A_ ) else: A = cls._build_index(A_ ) return cls( A_ ,question_encoder_tokenizer=A_ ,generator_tokenizer=A_ ,retrieval_workers=A_ ,index=A_ ,)
714
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]: A = parent A = 13 A = 7 A = True A = True A = True A = 99 A = 32 A = 2 A = 4 A = 37 A = 'gelu' A = 0.1 A = 0.1 A = 512 A = 16 A = 2 A = 0.02 A = 3 A = 4 A = None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = self.prepare_config_and_inputs() A = True A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict: A = TFEsmModel(config=A_ ) A = {'input_ids': input_ids, 'attention_mask': input_mask} A = model(A_ ) A = [input_ids, input_mask] A = model(A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]: A = True A = TFEsmModel(config=A_ ) A = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } A = model(A_ ) A = [input_ids, input_mask] A = model(A_ ,encoder_hidden_states=A_ ) # Also check the case where encoder outputs are not passed A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict: A = TFEsmForMaskedLM(config=A_ ) A = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]: A = self.num_labels A = TFEsmForTokenClassification(config=A_ ) A = {'input_ids': input_ids, 'attention_mask': input_mask} A = model(A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) _lowerCamelCase: List[str] = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: A = TFEsmModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFEsmModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @unittest.skip('Protein models do not support embedding resizing.' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip('Protein models do not support embedding resizing.' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A = model.get_bias() assert isinstance(A_ ,A_ ) for k, v in name.items(): assert isinstance(A_ ,tf.Variable ) else: A = model.get_output_embeddings() assert x is None A = model.get_bias() assert name is None @require_tf class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A = tf.constant([[0, 1, 2, 3, 4, 5]] ) A = model(A_ )[0] A = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) ,A_ ) # compare the actual values for a slice. A = tf.constant( [ [ [8.92_15_18, -10.58_98_14, -6.4_67_13_07], [-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15], [-7.78_12_47, -13.95_15_57, -3.74_05_92], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A = model(A_ )[0] # compare the actual values for a slice. A = tf.constant( [ [ [0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39], [0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22], [0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
22
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''mobilenet_v1''' def __init__( self : str ,A_ : Optional[Any]=3 ,A_ : int=224 ,A_ : int=1.0 ,A_ : Optional[Any]=8 ,A_ : int="relu6" ,A_ : Any=True ,A_ : str=0.9_99 ,A_ : Union[str, Any]=0.02 ,A_ : List[Any]=0.0_01 ,**A_ : Tuple ,) -> int: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float: return 1e-4
715
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys _lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') _lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split() _lowercase = '''|'''.join(sys.argv[1:]) _lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""") _lowercase = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
22
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase = { '''configuration_squeezebert''': [ '''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SqueezeBertConfig''', '''SqueezeBertOnnxConfig''', ], '''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''SqueezeBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SqueezeBertForMaskedLM''', '''SqueezeBertForMultipleChoice''', '''SqueezeBertForQuestionAnswering''', '''SqueezeBertForSequenceClassification''', '''SqueezeBertForTokenClassification''', '''SqueezeBertModel''', '''SqueezeBertModule''', '''SqueezeBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
716
"""simple docstring""" import sys from collections import defaultdict class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ) -> int: A = [] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]: return self.node_position[vertex] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]: A = pos def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: A = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: A = 2 * start + 1 else: A = 2 * start + 2 if heap[smallest_child] < heap[start]: A , A = heap[smallest_child], positions[smallest_child] A , A = ( heap[start], positions[start], ) A , A = temp, tempa A = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,A_ ) self.top_to_bottom(A_ ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict: A = position[index] while index != 0: A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: A = heap[parent] A = position[parent] self.set_position(position[parent] ,A_ ) else: A = val A = temp self.set_position(A_ ,A_ ) break A = parent else: A = val A = temp self.set_position(A_ ,0 ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]: A = len(A_ ) // 2 - 1 for i in range(A_ ,-1 ,-1 ): self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]: A = positions[0] A = sys.maxsize self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ ) return temp def _snake_case ( snake_case__ : Dict ): A = Heap() A = [0] * len(snake_case__ ) A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph A = [] # Heap of Distance of vertices from their neighboring vertex A = [] for vertex in range(len(snake_case__ ) ): distance_tv.append(sys.maxsize ) positions.append(snake_case__ ) heap.node_position.append(snake_case__ ) A = [] A = 1 A = sys.maxsize for neighbor, distance in adjacency_list[0]: A = 0 A = distance heap.heapify(snake_case__ , snake_case__ ) for _ in range(1 , len(snake_case__ ) ): A = heap.delete_minimum(snake_case__ , snake_case__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) A = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(snake_case__ )] ): A = distance heap.bottom_to_top( snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ ) A = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _lowercase = int(input('''Enter number of edges: ''').strip()) _lowercase = defaultdict(list) for _ in range(edges_number): _lowercase = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
22
0
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _snake_case ( snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int=1e-12 ): A = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T A = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T return jnp.matmul(snake_case__ , norm_emb_a.T ) class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' _lowerCamelCase: CLIPConfig _lowerCamelCase: jnp.dtype = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: A = FlaxCLIPVisionModule(self.config.vision_config ) A = nn.Dense(self.config.projection_dim ,use_bias=A_ ,dtype=self.dtype ) A = self.param('concept_embeds' ,jax.nn.initializers.ones ,(17, self.config.projection_dim) ) A = self.param( 'special_care_embeds' ,jax.nn.initializers.ones ,(3, self.config.projection_dim) ) A = self.param('concept_embeds_weights' ,jax.nn.initializers.ones ,(17,) ) A = self.param('special_care_embeds_weights' ,jax.nn.initializers.ones ,(3,) ) def __call__( self : Optional[int] ,A_ : str ) -> Optional[Any]: A = self.vision_model(A_ )[1] A = self.visual_projection(A_ ) A = jax_cosine_distance(A_ ,self.special_care_embeds ) A = jax_cosine_distance(A_ ,self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs A = 0.0 A = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment A = jnp.round(A_ ,3 ) A = jnp.any(special_scores > 0 ,axis=1 ,keepdims=A_ ) # Use a lower threshold if an image has any special care concept A = is_special_care * 0.01 A = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment A = jnp.round(A_ ,3 ) A = jnp.any(concept_scores > 0 ,axis=1 ) return has_nsfw_concepts class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = CLIPConfig _lowerCamelCase: Dict = '''clip_input''' _lowerCamelCase: str = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] ,A_ : CLIPConfig ,A_ : Optional[Tuple] = None ,A_ : int = 0 ,A_ : jnp.dtype = jnp.floataa ,A_ : bool = True ,**A_ : str ,) -> int: if input_shape is None: A = (1, 224, 224, 3) A = self.module_class(config=A_ ,dtype=A_ ,**A_ ) super().__init__(A_ ,A_ ,input_shape=A_ ,seed=A_ ,dtype=A_ ,_do_init=_do_init ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : jax.random.KeyArray ,A_ : Tuple ,A_ : FrozenDict = None ) -> FrozenDict: # init input tensor A = jax.random.normal(A_ ,A_ ) A , A = jax.random.split(A_ ) A = {'params': params_rng, 'dropout': dropout_rng} A = self.module.init(A_ ,A_ )['params'] return random_params def __call__( self : str ,A_ : Optional[Any] ,A_ : dict = None ,) -> List[str]: A = jnp.transpose(A_ ,(0, 2, 3, 1) ) return self.module.apply( {'params': params or self.params} ,jnp.array(A_ ,dtype=jnp.floataa ) ,rngs={} ,)
717
"""simple docstring""" import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter _lowercase = True except ImportError: _lowercase = False _lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( snake_case__ : Namespace ): return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('add-new-model' ) add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' ) add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' ) add_new_model_parser.add_argument( '--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' ) add_new_model_parser.set_defaults(func=A_ ) def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]: A = testing A = testing_file A = path def _SCREAMING_SNAKE_CASE ( self : int ) -> int: warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.' ) if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(A_ ) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.' ) A = ( Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) A = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(A_ ) ) else: with open(self._testing_file ,'r' ) as configuration_file: A = json.load(A_ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,) A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + '/configuration.json' ,'r' ) as configuration_file: A = json.load(A_ ) A = configuration['lowercase_modelname'] A = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(F'{directory}/configuration.json' ) A = 'PyTorch' in generate_tensorflow_pytorch_and_flax A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax A = 'Flax' in generate_tensorflow_pytorch_and_flax A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(A_ ,exist_ok=A_ ) os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ ) # Tests require submodules as they have parent imports with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ): pass shutil.move( F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,) shutil.move( F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,) def remove_copy_lines(A_ : int ): with open(A_ ,'r' ) as f: A = f.readlines() with open(A_ ,'w' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(A_ ) if output_pytorch: if not self._testing: remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,) else: os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,) shutil.move( F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,) shutil.move( F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(A_ : str ,A_ : str ,A_ : List[str] ): # Create temp file A , A = mkstemp() A = False with fdopen(A_ ,'w' ) as new_file: with open(A_ ) as old_file: for line in old_file: new_file.write(A_ ) if line_to_copy_below in line: A = True for line_to_copy in lines_to_copy: new_file.write(A_ ) if not line_found: raise ValueError(F'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(A_ ,A_ ) # Remove original file remove(A_ ) # Move new file move(A_ ,A_ ) def skip_units(A_ : Dict ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(A_ : Tuple ): with open(A_ ) as datafile: A = [] A = False A = False for line in datafile: if "# To replace in: " in line and "##" not in line: A = line.split('"' )[1] A = skip_units(A_ ) elif "# Below: " in line and "##" not in line: A = line.split('"' )[1] A = skip_units(A_ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(A_ ,A_ ,A_ ) A = [] elif "# Replace with" in line and "##" not in line: A = [] elif "##" not in line: lines_to_copy.append(A_ ) remove(A_ ) replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(A_ )
22
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : List[str] , snake_case__ : int=False , snake_case__ : Dict=False , snake_case__ : Union[str, Any]=False ): A = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def _snake_case ( snake_case__ : Dict , snake_case__ : Any ): for i in range(config.num_hidden_layers ): A = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' ) A = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A = in_proj_weight[ : config.hidden_size, : ] A = in_proj_bias[: config.hidden_size] A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A = in_proj_weight[ -config.hidden_size :, : ] A = in_proj_bias[-config.hidden_size :] def _snake_case ( snake_case__ : Optional[Any] ): A = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : str ): A = dct.pop(snake_case__ ) A = val @torch.no_grad() def _snake_case ( snake_case__ : str , snake_case__ : List[Any] ): A = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=snake_case__ ) A = False A = False A = False A = False if "vqa" in checkpoint_url: A = True A = 3129 A = 'huggingface/label-files' A = 'vqa2-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = idalabel A = {v: k for k, v in idalabel.items()} A = ViltForQuestionAnswering(snake_case__ ) elif "nlvr" in checkpoint_url: A = True A = 2 A = {0: 'False', 1: 'True'} A = {v: k for k, v in config.idalabel.items()} A = 3 A = ViltForImagesAndTextClassification(snake_case__ ) elif "irtr" in checkpoint_url: A = True A = ViltForImageAndTextRetrieval(snake_case__ ) elif "mlm_itm" in checkpoint_url: A = True A = ViltForMaskedLM(snake_case__ ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys A = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )['state_dict'] A = create_rename_keys(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) read_in_q_k_v(snake_case__ , snake_case__ ) if mlm_model or irtr_model: A = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) # load state dict into HuggingFace model model.eval() if mlm_model: A , A = model.load_state_dict(snake_case__ , strict=snake_case__ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(snake_case__ ) # Define processor A = ViltImageProcessor(size=384 ) A = BertTokenizer.from_pretrained('bert-base-uncased' ) A = ViltProcessor(snake_case__ , snake_case__ ) # Forward pass on example inputs (image + text) if nlvr_model: A = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=snake_case__ ).raw ) A = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=snake_case__ ).raw ) A = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) A = processor(snake_case__ , snake_case__ , return_tensors='pt' ) A = processor(snake_case__ , snake_case__ , return_tensors='pt' ) A = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: A = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=snake_case__ ).raw ) if mlm_model: A = 'a bunch of [MASK] laying on a [MASK].' else: A = 'How many cats are there?' A = processor(snake_case__ , snake_case__ , return_tensors='pt' ) A = model(**snake_case__ ) # Verify outputs if mlm_model: A = torch.Size([1, 11, 3_0522] ) A = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , snake_case__ , atol=1e-4 ) # verify masked token prediction equals "cats" A = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: A = torch.Size([1, 3129] ) A = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , snake_case__ , atol=1e-4 ) # verify vqa prediction equals "2" A = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: A = torch.Size([1, 2] ) A = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) assert outputs.logits.shape == expected_shape Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowercase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
718
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str: A = size if size is not None else {'height': 18, 'width': 18} A = parent A = batch_size A = num_channels A = image_size A = min_resolution A = max_resolution A = do_resize A = size A = do_normalize def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ImageGPTImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ ,'clusters' ) ) self.assertTrue(hasattr(A_ ,'do_resize' ) ) self.assertTrue(hasattr(A_ ,'size' ) ) self.assertTrue(hasattr(A_ ,'do_normalize' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} ) A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: A = self.image_processing_class(**self.image_processor_dict ) A = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,obj[key] ) ) else: self.assertEqual(obj[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(A_ ,'image_processor.json' ) image_processor_first.to_json_file(A_ ) A = self.image_processing_class.from_json_file(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(A_ ) A = self.image_processing_class.from_pretrained(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) @unittest.skip('ImageGPT requires clusters at initialization' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: pass def _snake_case ( ): A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' ) A = Image.open(dataset[4]['file'] ) A = Image.open(dataset[5]['file'] ) A = [imagea, imagea] return images @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' ) A = prepare_images() # test non-batched A = image_processing(images[0] ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(1, 1024) ) A = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ ) # test batched A = image_processing(A_ ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(2, 1024) ) A = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ )
22
0
"""simple docstring""" import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: A = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' ) A = AutoTokenizer.from_pretrained('google/mt5-small' ) A = tokenizer('Hello there' ,return_tensors='np' ).input_ids A = tokenizer('Hi I am' ,return_tensors='np' ).input_ids A = shift_tokens_right(A_ ,model.config.pad_token_id ,model.config.decoder_start_token_id ) A = model(A_ ,decoder_input_ids=A_ ).logits A = optax.softmax_cross_entropy(A_ ,onehot(A_ ,logits.shape[-1] ) ).mean() A = -(labels.shape[-1] * loss.item()) A = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
719
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def _snake_case ( snake_case__ : Optional[int] ): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' ) download_parser.add_argument( '--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,) download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' ) download_parser.set_defaults(func=A_ ) def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]: A = model A = cache A = force A = trust_remote_code def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
22
0
"""simple docstring""" _lowercase = [0, 2, 4, 6, 8] _lowercase = [1, 3, 5, 7, 9] def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 A = 0 for digit in range(10 ): A = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , snake_case__ , snake_case__ ) return result A = 0 for digita in range(10 ): A = digita if (remainder + digita) % 2 == 0: A = ODD_DIGITS else: A = EVEN_DIGITS for digita in other_parity_digits: A = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , snake_case__ , snake_case__ , ) return result def _snake_case ( snake_case__ : int = 9 ): A = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(snake_case__ , 0 , [0] * length , snake_case__ ) return result if __name__ == "__main__": print(F"""{solution() = }""")
720
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''spm_char.model'''} _lowercase = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } _lowercase = { '''microsoft/speecht5_asr''': 10_24, '''microsoft/speecht5_tts''': 10_24, '''microsoft/speecht5_vc''': 10_24, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None: A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = vocab_file A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.sp_model.get_piece_size() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Any: A = self.__dict__.copy() A = None return state def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]: return self.sp_model.piece_to_id(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]: A = self.sp_model.IdToPiece(A_ ) return token def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]: A = [] A = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A_ ) + token A = [] else: current_sub_tokens.append(A_ ) out_string += self.sp_model.decode(A_ ) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) A = [1] if token_ids_a is None: return ([0] * len(A_ )) + suffix_ones return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
22
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''sentencepiece.model'''} _lowercase = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, } _lowercase = { '''google/rembert''': 2_56, } class __lowercase ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = VOCAB_FILES_NAMES _lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Dict ,A_ : List[Any] ,A_ : Any=False ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Optional[Any]="[CLS]" ,A_ : Any="[SEP]" ,A_ : Any="[UNK]" ,A_ : str="[SEP]" ,A_ : int="[PAD]" ,A_ : Optional[Any]="[CLS]" ,A_ : Dict="[MASK]" ,**A_ : Any ,) -> Any: super().__init__( do_lower_case=A_ ,remove_space=A_ ,keep_accents=A_ ,bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,pad_token=A_ ,cls_token=A_ ,mask_token=A_ ,**A_ ,) A = do_lower_case A = remove_space A = keep_accents A = vocab_file A = spm.SentencePieceProcessor() self.sp_model.Load(A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: return len(self.sp_model ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> Dict: A = self.__dict__.copy() A = None return state def __setstate__( self : Optional[Any] ,A_ : str ) -> Dict: A = d A = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[Any] ,A_ : Optional[Any]=False ) -> Optional[int]: A = self.sp_model.EncodeAsPieces(A_ ) return pieces def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ) -> Dict: return self.sp_model.PieceToId(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Any ) -> Optional[Any]: return self.sp_model.IdToPiece(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[Any] ) -> Optional[Any]: A = self.sp_model.decode_pieces(A_ ) return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file ,A_ ) return (out_vocab_file,)
721
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''CLIPFeatureExtractor'''] _lowercase = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''', } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = '''mra''' def __init__( self : List[Any] ,A_ : Tuple=5_0265 ,A_ : int=768 ,A_ : Dict=12 ,A_ : Union[str, Any]=12 ,A_ : Tuple=3072 ,A_ : Tuple="gelu" ,A_ : Optional[Any]=0.1 ,A_ : Optional[Any]=0.1 ,A_ : Dict=512 ,A_ : Any=1 ,A_ : str=0.02 ,A_ : List[Any]=1e-5 ,A_ : Optional[Any]="absolute" ,A_ : Union[str, Any]=4 ,A_ : Union[str, Any]="full" ,A_ : Union[str, Any]=0 ,A_ : str=0 ,A_ : Optional[Any]=1 ,A_ : str=0 ,A_ : Any=2 ,**A_ : Tuple ,) -> List[str]: super().__init__(pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,**A_ ) A = vocab_size A = max_position_embeddings A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = type_vocab_size A = layer_norm_eps A = position_embedding_type A = block_per_row A = approx_mode A = initial_prior_first_n_blocks A = initial_prior_diagonal_n_blocks
700
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: Dict = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[Any] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
22
0
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[Any] ): A = [] for part_id in partition_order: A = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect() for row_idx, row in enumerate(snake_case__ ): expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ): A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() A = spark.range(100 ).repartition(1 ) A = Spark(snake_case__ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ): A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() A = spark.range(10 ).repartition(2 ) A = [1, 0] A = _generate_iterable_examples(snake_case__ , snake_case__ ) # Reverse the partitions. A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): A , A = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ): A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() A = spark.range(10 ).repartition(1 ) A = SparkExamplesIterable(snake_case__ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(snake_case__ ): assert row_id == F'0_{i}' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ): A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() A = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('numpy.random.Generator' ) as generator_mock: A = lambda snake_case__ : x.reverse() A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0] ) A = SparkExamplesIterable(snake_case__ ).shuffle_data_sources(snake_case__ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(snake_case__ ): A , A = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ): A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() A = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 A = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2] ) for i, (row_id, row_dict) in enumerate(snake_case__ ): A , A = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 A = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3] ) for i, (row_id, row_dict) in enumerate(snake_case__ ): A , A = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ): A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() A = spark.range(100 ).repartition(1 ) A = Spark(snake_case__ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
701
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _lowercase = 8 def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ): A = x.device A = (x * 255).int().clamp(0 , 255 ) A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' ) A = ((x & mask) != 0).float() A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' ) A = bits * 2 - 1 return bits def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ): A = x.device A = (x > 0).int() A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 ) A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 255).clamp(0.0 , 1.0 ) def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ): if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) A = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas A = self.alphas_cumprod[timestep] A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod A = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) A = self._get_variance(snake_case__ , snake_case__ ) A = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu' A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ ) A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise A = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ): A = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 ) else: A = None # 1. compute alphas, betas A = self.alphas_cumprod[t] A = self.alphas_cumprod[t - 1] if t > 0 else self.one A = 1 - alpha_prod_t A = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": A = model_output else: raise ValueError(F'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A = 0 if t > 0: A = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device ) A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise A = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]: super().__init__() A = bit_scale A = ( ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=A_ ,scheduler=A_ ) @torch.no_grad() def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]: A = torch.randn( (batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,) A = decimal_to_bits(A_ ) * self.bit_scale A = latents.to(self.device ) self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual A = self.unet(A_ ,A_ ).sample # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample A = bits_to_decimal(A_ ) if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
22
0
"""simple docstring""" import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any ,A_ : int ) -> Optional[int]: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : Union[str, Any] ,A_ : int ) -> List[str]: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ) -> Optional[Any]: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : List[Any]=80 , snake_case__ : Dict=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[Any] ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : List[str] , snake_case__ : int , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
702
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''yolos''' def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any: super().__init__(**A_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = num_detection_tokens A = use_mid_position_embeddings A = auxiliary_loss # Hungarian matcher A = class_cost A = bbox_cost A = giou_cost # Loss coefficients A = bbox_loss_coefficient A = giou_loss_coefficient A = eos_coefficient class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> float: return 1e-4 @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: return 12
22
0