code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import os # Precomputes a list of the 100 first triangular numbers __SCREAMING_SNAKE_CASE = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def UpperCAmelCase ( ): A : int = os.path.dirname(os.path.realpath(UpperCAmelCase__ ) ) A : str = os.path.join(UpperCAmelCase__ , "words.txt" ) A : Union[str, Any] = "" with open(UpperCAmelCase__ ) as f: A : str = f.readline() A : Optional[int] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )] A : Any = [ word for word in [sum(ord(UpperCAmelCase__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(UpperCAmelCase__ ) if __name__ == "__main__": print(solution())
713
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
0
import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : List[Any]=0.6 , __lowerCamelCase : Any=None , ) -> int: A : Dict = parent A : Optional[int] = batch_size A : List[str] = image_size A : Dict = patch_size A : Dict = num_channels A : Dict = is_training A : str = use_labels A : Optional[int] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Optional[Any] = intermediate_size A : Dict = hidden_act A : str = hidden_dropout_prob A : int = attention_probs_dropout_prob A : Dict = type_sequence_label_size A : List[Any] = initializer_range A : int = mask_ratio A : Dict = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Union[str, Any] = None if self.use_labels: A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> int: A : Optional[int] = ViTMAEModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> Optional[Any]: A : str = ViTMAEForPreTraining(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A : List[str] = model(__UpperCamelCase ) A : List[Any] = (self.image_size // self.patch_size) ** 2 A : Tuple = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images A : Union[str, Any] = 1 A : Union[str, Any] = ViTMAEForPreTraining(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : List[Any] = model(__UpperCamelCase ) A : Union[str, Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Optional[int] = self.prepare_config_and_inputs() A , A , A : Optional[int] = config_and_inputs A : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () a__ = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: A : str = ViTMAEModelTester(self ) A : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: pass def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A , A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : int = model_class(__UpperCamelCase ) A : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Dict = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ) -> int: # make masks reproducible np.random.seed(2 ) A : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) A : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) A : Union[str, Any] = torch.from_numpy(__UpperCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument A : Dict = pt_noise super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A , A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : int = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): A : Any = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) A : Optional[Any] = outputs[0].cpu().numpy() A : Tuple = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCamelCase ) A : int = model_class.from_pretrained(__UpperCamelCase ) model.to(__UpperCamelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) # Make sure we don't have nans A : Optional[int] = after_outputs[0].cpu().numpy() A : Dict = 0 A : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCamelCase , 1e-5 ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: pass @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : int = ViTMAEModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: # make random mask reproducible across the PT and TF model np.random.seed(2 ) A : Optional[Any] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase ) A : Union[str, Any] = self.default_image_processor A : int = prepare_img() A : Any = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) A : Any = ViTMAEConfig() A : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) A : Optional[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): A : Optional[Any] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) ) # verify the logits A : int = torch.Size((1, 1_96, 7_68) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) A : Any = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1e-4 ) )
714
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
0
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = """ybelkada/fonts""" def UpperCAmelCase ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """ "Pix2StructImageProcessor. Please upgrade torch." ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): requires_backends(_UpperCamelCase , ["torch"] ) _check_torch_version() A : List[str] = image_tensor.unsqueeze(0 ) A : Dict = torch.nn.functional.unfold(_UpperCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) A : Dict = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _UpperCamelCase , _UpperCamelCase , -1 ) A : Union[str, Any] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = 36 , _lowerCamelCase = "black" , _lowerCamelCase = "white" , _lowerCamelCase = 5 , _lowerCamelCase = 5 , _lowerCamelCase = 5 , _lowerCamelCase = 5 , _lowerCamelCase = None , _lowerCamelCase = None , ): requires_backends(_UpperCamelCase , "vision" ) # Add new lines so that each line is no more than 80 characters. A : List[str] = textwrap.TextWrapper(width=80 ) A : Union[str, Any] = wrapper.wrap(text=_UpperCamelCase ) A : Optional[Any] = "\n".join(_UpperCamelCase ) if font_bytes is not None and font_path is None: A : Optional[int] = io.BytesIO(_UpperCamelCase ) elif font_path is not None: A : Union[str, Any] = font_path else: A : Dict = hf_hub_download(_UpperCamelCase , "Arial.TTF" ) A : Dict = ImageFont.truetype(_UpperCamelCase , encoding="UTF-8" , size=_UpperCamelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. A : Tuple = ImageDraw.Draw(Image.new("RGB" , (1, 1) , _UpperCamelCase ) ) A , A , A , A : List[str] = temp_draw.textbbox((0, 0) , _UpperCamelCase , _UpperCamelCase ) # Create the actual image with a bit of padding around the text. A : Optional[int] = text_width + left_padding + right_padding A : int = text_height + top_padding + bottom_padding A : int = Image.new("RGB" , (image_width, image_height) , _UpperCamelCase ) A : Tuple = ImageDraw.Draw(_UpperCamelCase ) draw.text(xy=(left_padding, top_padding) , text=_UpperCamelCase , fill=_UpperCamelCase , font=_UpperCamelCase ) return image def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ): requires_backends(_UpperCamelCase , "vision" ) # Convert to PIL image if necessary A : Any = to_pil_image(_UpperCamelCase ) A : Union[str, Any] = render_text(_UpperCamelCase , **_UpperCamelCase ) A : Optional[Any] = max(header_image.width , image.width ) A : List[str] = int(image.height * (new_width / image.width) ) A : Union[str, Any] = int(header_image.height * (new_width / header_image.width) ) A : str = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary A : Tuple = to_numpy_array(_UpperCamelCase ) if infer_channel_dimension_format(_UpperCamelCase ) == ChannelDimension.LAST: A : Optional[Any] = to_channel_dimension_format(_UpperCamelCase , ChannelDimension.LAST ) return new_image class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = ["""flattened_patches"""] def __init__( self : Union[str, Any] , __lowerCamelCase : Dict = True , __lowerCamelCase : Any = True , __lowerCamelCase : int = None , __lowerCamelCase : List[Any] = 20_48 , __lowerCamelCase : Optional[Any] = False , **__lowerCamelCase : Any , ) -> List[Any]: super().__init__(**__lowerCamelCase ) A : Dict = patch_size if patch_size is not None else {"height": 16, "width": 16} A : Optional[int] = do_normalize A : int = do_convert_rgb A : str = max_patches A : Optional[Any] = is_vqa def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , **__lowerCamelCase : int ) -> Optional[Any]: requires_backends(self.extract_flattened_patches , "torch" ) _check_torch_version() # convert to torch A : Optional[Any] = to_channel_dimension_format(__lowerCamelCase , ChannelDimension.FIRST ) A : Any = torch.from_numpy(__lowerCamelCase ) A , A : Optional[int] = patch_size["height"], patch_size["width"] A , A : int = get_image_size(__lowerCamelCase ) # maximize scale s.t. A : Union[str, Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) A : Dict = max(min(math.floor(scale * image_height / patch_height ) , __lowerCamelCase ) , 1 ) A : str = max(min(math.floor(scale * image_width / patch_width ) , __lowerCamelCase ) , 1 ) A : str = max(num_feasible_rows * patch_height , 1 ) A : Dict = max(num_feasible_cols * patch_width , 1 ) A : Optional[Any] = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=__lowerCamelCase , antialias=__lowerCamelCase , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] A : Optional[int] = torch_extract_patches(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : int = patches.shape A : int = patches_shape[1] A : List[Any] = patches_shape[2] A : Tuple = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] A : Any = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] A : Any = torch.arange(__lowerCamelCase ).reshape([rows, 1] ).repeat(1 , __lowerCamelCase ).reshape([rows * columns, 1] ) A : List[Any] = torch.arange(__lowerCamelCase ).reshape([1, columns] ).repeat(__lowerCamelCase , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] A : List[str] = row_ids.to(torch.floataa ) A : str = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] A : int = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] A : str = torch.nn.functional.pad(__lowerCamelCase , [0, 0, 0, max_patches - (rows * columns)] ).float() A : List[str] = to_numpy_array(__lowerCamelCase ) return result def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int = None , **__lowerCamelCase : str ) -> Tuple: if image.dtype == np.uinta: A : List[Any] = image.astype(np.floataa ) # take mean across the whole `image` A : List[Any] = np.mean(__lowerCamelCase ) A : Optional[int] = np.std(__lowerCamelCase ) A : Optional[int] = max(__lowerCamelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any = None , __lowerCamelCase : Tuple = None , __lowerCamelCase : List[Any] = None , __lowerCamelCase : Tuple = None , __lowerCamelCase : Optional[Any] = None , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : Union[str, Any] = ChannelDimension.FIRST , **__lowerCamelCase : Optional[Any] , ) -> List[Any]: A : List[str] = do_normalize if do_normalize is not None else self.do_normalize A : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A : List[Any] = patch_size if patch_size is not None else self.patch_size A : Dict = max_patches if max_patches is not None else self.max_patches A : Optional[int] = self.is_vqa if kwargs.get("data_format" , __lowerCamelCase ) is not None: raise ValueError("data_format is not an accepted input as the outputs are " ) A : Optional[Any] = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A : int = [convert_to_rgb(__lowerCamelCase ) for image in images] # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(__lowerCamelCase ) for image in images] if is_vqa: if header_text is None: raise ValueError("A header text must be provided for VQA models." ) A : List[str] = kwargs.pop("font_bytes" , __lowerCamelCase ) A : str = kwargs.pop("font_path" , __lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : List[Any] = [header_text] * len(__lowerCamelCase ) A : Tuple = [ render_header(__lowerCamelCase , header_text[i] , font_bytes=__lowerCamelCase , font_path=__lowerCamelCase ) for i, image in enumerate(__lowerCamelCase ) ] if do_normalize: A : int = [self.normalize(image=__lowerCamelCase ) for image in images] # convert to torch tensor and permute A : int = [ self.extract_flattened_patches(image=__lowerCamelCase , max_patches=__lowerCamelCase , patch_size=__lowerCamelCase ) for image in images ] # create attention mask in numpy A : Optional[int] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] A : int = BatchFeature( data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=__lowerCamelCase ) return encoded_outputs
715
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
0
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = TypeVar("""DatasetType""", Dataset, IterableDataset) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(_lowerCamelCase ): if not isinstance(_lowerCamelCase , (Dataset, IterableDataset) ): if isinstance(_lowerCamelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(_lowerCamelCase )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowerCamelCase ) )}\']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCamelCase ).__name__}.""" ) if i == 0: A , A : int = ( (Dataset, IterableDataset) if isinstance(_lowerCamelCase , _lowerCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , info=_lowerCamelCase , split=_lowerCamelCase , stopping_strategy=_lowerCamelCase ) else: return _interleave_iterable_datasets( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , info=_lowerCamelCase , split=_lowerCamelCase , stopping_strategy=_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , ): if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(_lowerCamelCase ): if not isinstance(_lowerCamelCase , (Dataset, IterableDataset) ): if isinstance(_lowerCamelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(_lowerCamelCase )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowerCamelCase ) )}\']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCamelCase ).__name__}.""" ) if i == 0: A , A : Any = ( (Dataset, IterableDataset) if isinstance(_lowerCamelCase , _lowerCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_lowerCamelCase , info=_lowerCamelCase , split=_lowerCamelCase , axis=_lowerCamelCase ) else: return _concatenate_iterable_datasets(_lowerCamelCase , info=_lowerCamelCase , split=_lowerCamelCase , axis=_lowerCamelCase )
716
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int: A : Optional[int] = parent A : List[str] = batch_size A : Tuple = image_size A : List[str] = num_channels A : List[str] = embeddings_size A : List[str] = hidden_sizes A : str = depths A : Optional[Any] = is_training A : int = use_labels A : Optional[int] = hidden_act A : List[Any] = num_labels A : List[str] = scope A : str = len(__lowerCamelCase ) A : Optional[int] = out_features A : str = out_indices A : Optional[int] = num_groups def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple: A : Union[str, Any] = self.num_labels A : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]: A : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A : Optional[Any] = None A : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : List[str] = self.prepare_config_and_inputs() A , A , A : Tuple = config_and_inputs A : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Any = BitModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return @unittest.skip(reason="Bit does not output attentions" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) A : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Optional[Any] = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ): A : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Union[str, Any] = BitModelTester(self )
17
0
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowerCamelCase_ : '''simple docstring''' def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : str=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=19 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : int=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=5_12 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=None , ) -> int: A : Optional[Any] = parent A : str = batch_size A : Optional[Any] = seq_length A : str = is_training A : int = use_input_mask A : List[str] = use_token_type_ids A : Any = use_labels A : str = vocab_size A : Optional[int] = hidden_size A : Optional[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : Tuple = intermediate_size A : Dict = hidden_act A : str = hidden_dropout_prob A : Optional[int] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : Tuple = type_vocab_size A : Optional[int] = type_sequence_label_size A : List[str] = initializer_range A : Optional[Any] = num_labels A : Union[str, Any] = num_choices A : List[Any] = scope def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : List[Any] = None if self.use_input_mask: A : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A : List[Any] = None A : List[str] = None A : Union[str, Any] = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) A : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: A : Any = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCamelCase__ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , ) return config def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> List[Any]: A : int = EsmForProteinFolding(config=lowerCamelCase__ ).float() model.to(lowerCamelCase__ ) model.eval() A : Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ) A : List[str] = model(lowerCamelCase__ ) A : Any = model(lowerCamelCase__ ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: A : List[str] = self.prepare_config_and_inputs() ( A ) : str = config_and_inputs A : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( __lowerCAmelCase ,__lowerCAmelCase ,unittest.TestCase ): '''simple docstring''' a__ = False a__ = (EsmForProteinFolding,) if is_torch_available() else () a__ = () a__ = {} if is_torch_available() else {} a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]: A : List[Any] = EsmFoldModelTester(self ) A : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @unittest.skip("Does not support attention outputs" ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: pass @unittest.skip def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]: pass @unittest.skip("Esm does not support embedding resizing" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: pass @unittest.skip("Esm does not support embedding resizing" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: pass @unittest.skip("ESMFold does not support passing input embeds!" ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]: pass @unittest.skip("ESMFold does not support head pruning." ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: pass @unittest.skip("ESMFold does not support head pruning." ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: pass @unittest.skip("ESMFold does not support head pruning." ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any: pass @unittest.skip("ESMFold does not support head pruning." ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: pass @unittest.skip("ESMFold does not support head pruning." ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: pass @unittest.skip("ESMFold does not output hidden states in the normal way." ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> str: pass @unittest.skip("ESMfold does not output hidden states in the normal way." ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: pass @unittest.skip("ESMFold only has one output format." ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]: pass @unittest.skip("This test doesn\'t work for ESMFold and doesn\'t test core functionality" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: pass @unittest.skip("ESMFold does not support input chunking." ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: pass @unittest.skip("ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments." ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: pass @unittest.skip("ESMFold doesn\'t support torchscript compilation." ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple: pass @unittest.skip("ESMFold doesn\'t support torchscript compilation." ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass @unittest.skip("ESMFold doesn\'t support torchscript compilation." ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: pass @unittest.skip("ESMFold doesn\'t support data parallel." ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: pass @require_torch class lowerCamelCase_ ( __lowerCAmelCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : Optional[Any] = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float() model.eval() A : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A : Optional[Any] = model(lowerCamelCase__ )['''positions'''] A : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCamelCase__ , atol=1e-4 ) )
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
0
import heapq def UpperCAmelCase ( _lowerCamelCase ): A : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(snake_case_ , [-1 * len(snake_case_ ), (key, value)] ) # chosen_vertices = set of chosen vertices A : List[str] = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A : int = heapq.heappop(snake_case_ )[1][0] chosen_vertices.add(snake_case_ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A : Optional[int] = elem[1][1].index(snake_case_ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(snake_case_ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
718
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
0
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ ( lowercase_ ): '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=99 , __lowerCamelCase : int=32 , __lowerCamelCase : Tuple=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Tuple=5_12 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : int=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]="None" , __lowerCamelCase : Any=3 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Optional[Any]=None , ) -> List[str]: A : List[str] = parent A : Tuple = batch_size A : Any = seq_length A : Union[str, Any] = is_training A : Tuple = use_input_mask A : str = use_token_type_ids A : Union[str, Any] = use_labels A : Any = vocab_size A : List[str] = hidden_size A : List[str] = num_hidden_layers A : Optional[int] = num_attention_heads A : List[str] = intermediate_size A : Tuple = hidden_act A : Dict = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Union[str, Any] = max_position_embeddings A : Optional[int] = type_vocab_size A : Optional[int] = type_sequence_label_size A : Tuple = initializer_range A : Any = num_labels A : List[Any] = num_choices A : Tuple = relative_attention A : Optional[Any] = position_biased_input A : Any = pos_att_type A : Optional[Any] = scope def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : int = None if self.use_input_mask: A : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) A : List[str] = None if self.use_token_type_ids: A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Any = None A : Tuple = None A : int = None if self.use_labels: A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) A : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : List[Any] ) -> Any: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ) -> Dict: A : Any = DebertaVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )[0] A : int = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )[0] A : Any = model(__lowerCamelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ) -> Union[str, Any]: A : int = DebertaVaForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : str ) -> List[str]: A : str = self.num_labels A : int = DebertaVaForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> Any: A : List[Any] = self.num_labels A : Optional[Any] = DebertaVaForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : Optional[Any] = DebertaVaForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> int: A : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Optional[Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: A : List[Any] = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) : List[str] = config_and_inputs A : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( lowercase_ ,lowercase_ ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) a__ = ( { '''feature-extraction''': DebertaVaModel, '''fill-mask''': DebertaVaForMaskedLM, '''question-answering''': DebertaVaForQuestionAnswering, '''text-classification''': DebertaVaForSequenceClassification, '''token-classification''': DebertaVaForTokenClassification, '''zero-shot''': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) a__ = True a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: A : List[Any] = DebertaVaModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> str: A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Dict = DebertaVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason="Model not available yet" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: pass @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]: A : Union[str, Any] = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" ) A : List[Any] = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) A : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] # compare the actual values for a slice. A : Dict = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
719
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (PNDMScheduler,) a__ = (("num_inference_steps", 50),) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]: A : Union[str, Any] = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple: A : Dict = dict(self.forward_default_kwargs ) A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : Union[str, Any] = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Any = self.get_scheduler_config(**__lowerCamelCase ) A : int = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : Dict = scheduler_class.from_pretrained(__lowerCamelCase ) new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str: A : List[str] = dict(self.forward_default_kwargs ) A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : List[str] = self.dummy_sample A : Any = 0.1 * sample A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Tuple = self.get_scheduler_config() A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : str = scheduler_class.from_pretrained(__lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[:] A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]: A : Optional[Any] = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(**__lowerCamelCase ) A : str = scheduler_class(**__lowerCamelCase ) A : List[str] = 10 A : Union[str, Any] = self.dummy_model() A : int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A : Tuple = model(__lowerCamelCase , __lowerCamelCase ) A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: A : Union[str, Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) A : List[Any] = self.dummy_sample A : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCamelCase ) elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ): A : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A : str = 27 for scheduler_class in self.scheduler_classes: A : Tuple = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : Optional[Any] = self.full_loop() A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Any = self.full_loop(prediction_type="v_prediction" ) A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) A : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
17
0
from PIL import Image def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Tuple = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase ) -> int: return int(128 + factor * (c - 128) ) return img.point(lowerCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 __SCREAMING_SNAKE_CASE = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
720
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
17
0
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Tuple = set() A : List[str] = [] def parse_line(_lowerCamelCase ): for line in fp: if isinstance(_lowerCamelCase , _lowerCamelCase ): A : int = line.decode("UTF-8" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" " ): # process a single warning and move it to `selected_warnings`. if len(_lowerCamelCase ) > 0: A : Tuple = "\n".join(_lowerCamelCase ) # Only keep the warnings specified in `targets` if any(f""": {x}: """ in warning for x in targets ): selected_warnings.add(_lowerCamelCase ) buffer.clear() continue else: A : Optional[Any] = line.strip() buffer.append(_lowerCamelCase ) if from_gh: for filename in os.listdir(_lowerCamelCase ): A : int = os.path.join(_lowerCamelCase , _lowerCamelCase ) if not os.path.isdir(_lowerCamelCase ): # read the file if filename != "warnings.txt": continue with open(_lowerCamelCase ) as fp: parse_line(_lowerCamelCase ) else: try: with zipfile.ZipFile(_lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCamelCase ): # read the file if filename != "warnings.txt": continue with z.open(_lowerCamelCase ) as fp: parse_line(_lowerCamelCase ) except Exception: logger.warning( f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : str = set() A : List[Any] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) ) return selected_warnings if __name__ == "__main__": def UpperCAmelCase ( _lowerCamelCase ): return values.split("," ) __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __SCREAMING_SNAKE_CASE = extract_warnings(args.output_dir, args.targets) __SCREAMING_SNAKE_CASE = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
721
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
0
from math import factorial def UpperCAmelCase ( _lowerCamelCase = 100 ): return sum(map(_lowerCamelCase , str(factorial(_lowerCamelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
700
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0
from typing import TYPE_CHECKING from ..utils import _LazyModule __SCREAMING_SNAKE_CASE = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
701
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
17
0
import datasets from .evaluate import evaluate __SCREAMING_SNAKE_CASE = """\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } """ __SCREAMING_SNAKE_CASE = """ This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. """ __SCREAMING_SNAKE_CASE = """ Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': the text of the answer references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the SQuAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] >>> squad_metric = datasets.load_metric(\"squad\") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )}, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ) -> Dict: A : Dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} A : str = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] A : List[Any] = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase ) return score
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __SCREAMING_SNAKE_CASE = [ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def UpperCAmelCase ( _lowerCamelCase=True ): if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) ) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = None a__ = None def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> List[str]: with TemporaryDirectory() as tmp_dir: A : int = dataset_module_factory(__lowerCamelCase , cache_dir=__lowerCamelCase ) A : Optional[int] = import_main_class(dataset_module.module_path , dataset=__lowerCamelCase ) A : DatasetBuilder = builder_cls( cache_dir=__lowerCamelCase , config_name=__lowerCamelCase , hash=dataset_module.hash , ) A : List[Any] = "/".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=__lowerCamelCase ).replace(os.sep , "/" ), config.DATASET_INFO_FILENAME, ] ) A : Dict = cached_path(__lowerCamelCase , cache_dir=__lowerCamelCase ) self.assertTrue(os.path.exists(__lowerCamelCase ) ) @pytest.mark.integration def UpperCAmelCase ( _lowerCamelCase ): A : Tuple = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple" A : Union[str, Any] = dataset_module_factory("wikipedia" , cache_dir=_lowerCamelCase ) A : Union[str, Any] = import_main_class(dataset_module.module_path ) A : DatasetBuilder = builder_cls( cache_dir=_lowerCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam A : Dict = None builder_instance.download_and_prepare() A : List[Any] = builder_instance.as_dataset() assert ds @pytest.mark.integration def UpperCAmelCase ( _lowerCamelCase ): A : Tuple = dataset_module_factory("wikipedia" , cache_dir=_lowerCamelCase ) A : List[Any] = import_main_class(dataset_module.module_path , dataset=_lowerCamelCase ) A : DatasetBuilder = builder_cls( cache_dir=_lowerCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , ) A : Union[str, Any] = builder_instance.as_streaming_dataset() assert ds assert isinstance(_lowerCamelCase , _lowerCamelCase ) assert "train" in ds assert isinstance(ds["train"] , _lowerCamelCase ) assert next(iter(ds["train"] ) )
703
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __SCREAMING_SNAKE_CASE = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase ( ): A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json" A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys() return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) ) def UpperCAmelCase ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : List[Any] = Path(_lowerCamelCase ) / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): init_hf_modules() A : Tuple = Path(_lowerCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : Optional[int] = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Union[str, Any] = f.read() # Imports of the form `import .xxx` A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(_lowerCamelCase ) ) def UpperCAmelCase ( _lowerCamelCase ): A : Optional[int] = False A : Tuple = [module_file] A : Optional[int] = [] # Let's recurse through all relative imports while not no_change: A : Optional[Any] = [] for f in files_to_check: new_imports.extend(get_relative_imports(_lowerCamelCase ) ) A : Optional[Any] = Path(_lowerCamelCase ).parent A : List[str] = [str(module_path / m ) for m in new_imports] A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports] A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files] A : Tuple = len(_lowerCamelCase ) == 0 all_relative_imports.extend(_lowerCamelCase ) return all_relative_imports def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Dict = f.read() # Imports of the form `import xxx` A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Only keep the top-level module A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all A : Any = list(set(_lowerCamelCase ) ) A : Tuple = [] for imp in imports: try: importlib.import_module(_lowerCamelCase ) except ImportError: missing_packages.append(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" ) return get_relative_imports(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : int = module_path.replace(os.path.sep , "." ) A : Optional[Any] = importlib.import_module(_lowerCamelCase ) if class_name is None: return find_pipeline_class(_lowerCamelCase ) return getattr(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): from ..pipelines import DiffusionPipeline A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) ) A : Union[str, Any] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _lowerCamelCase ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) A : Any = cls return pipeline_class def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ): A : List[Any] = str(_lowerCamelCase ) A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ): A : Union[str, Any] = module_file_or_url A : Any = "local" elif pretrained_model_name_or_path.count("/" ) == 0: A : Optional[Any] = get_diffusers_versions() # cut ".dev0" A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: A : List[Any] = latest_version if latest_version[1:] in available_versions else "main" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: A : Optional[Any] = f"""v{revision}""" elif revision == "main": A : Dict = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {", ".join(available_versions + ["main"] )}.""" ) # community pipeline on GitHub A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase ) try: A : Optional[int] = cached_download( _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = "git" A : Any = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached A : Any = hf_hub_download( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment A : List[str] = check_imports(_lowerCamelCase ) # Now we move the module inside our cached dynamic modules. A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_lowerCamelCase ) A : Optional[int] = Path(_lowerCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_lowerCamelCase , submodule_path / module_file ) for module_needed in modules_needed: A : int = f"""{module_needed}.py""" shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = use_auth_token elif use_auth_token is True: A : Dict = HfFolder.get_token() else: A : Tuple = None A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. A : str = submodule_path / commit_hash A : List[str] = full_submodule + os.path.sep + commit_hash create_dynamic_module(_lowerCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(_lowerCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return os.path.join(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ): A : int = get_cached_module_file( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) )
17
0
import math def UpperCAmelCase ( _lowerCamelCase ): A : str = 0 A : Any = 0 while num > 0: A : Union[str, Any] = num % 8 A : Tuple = octal + (remainder * math.floor(math.pow(10 , _lowerCamelCase ) )) counter += 1 A : Optional[int] = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f"""0o{int(_lowerCamelCase )}""" def UpperCAmelCase ( ): print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(65 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(216 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(512 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
704
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) __SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict: A : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , ) A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history if not do_eval: return A : List[Any] = [log for log in logs if "eval_loss" in log.keys()] A : Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A : List[str] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: self.run_seqaseq_quick( distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A : Dict = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } A : List[str] = experiments[experiment_id] A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} A : Union[str, Any] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] ) A : Dict = len(re.findall(__lowerCamelCase , cl.err ) ) self.assertEqual(__lowerCamelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : int = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , ) # Check metrics A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history A : Dict = [log for log in logs if "eval_loss" in log.keys()] A : Dict = eval_metrics[0] A : int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) # test if do_predict saves generations and metrics A : Optional[Any] = os.listdir(__lowerCamelCase ) A : Any = {os.path.basename(__lowerCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]: A : Optional[int] = "--skip_memory_metrics 0" A : str = self.run_trainer( max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , ) # Check metrics A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) A : int = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A : int = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A : Tuple = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( __lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]: A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" A : Optional[int] = self.get_auto_remove_tmp_dir() A : int = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() A : Optional[Any] = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCamelCase )} """.split() A : Optional[Any] = "\n --do_predict\n ".split() A : Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A : Dict = get_gpu_count() A : Any = get_torch_dist_unique_port() A : Optional[Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() A : Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCamelCase , env=self.get_env() ) else: A : List[Any] = ["run_translation.py"] + args with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): main() return output_dir
17
0
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
705
from collections.abc import Sequence def UpperCAmelCase ( _lowerCamelCase = None ): if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A : Dict = nums[0] for i in range(1 , len(_lowerCamelCase ) ): A : Tuple = nums[i] A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip()) __SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
17
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = field(default="image-classification" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"image": Image()} ) a__ = Features({"labels": ClassLabel} ) a__ = "image" a__ = "labels" def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : int ) -> int: if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , __lowerCamelCase ): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" ) A : Optional[int] = copy.deepcopy(self ) A : Union[str, Any] = self.label_schema.copy() A : List[Any] = features[self.label_column] A : List[Any] = label_schema return task_template @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict[str, str]: return { self.image_column: "image", self.label_column: "labels", }
706
from math import sqrt def UpperCAmelCase ( _lowerCamelCase = 100_0000 ): A : int = 0 A : int = 0 A : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
17
0
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): return number | (1 << position) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): return number & ~(1 << position) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): return number ^ (1 << position) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): return ((number >> position) & 1) == 1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
707
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE = """.""" if __name__ == "__main__": __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE = line.strip() __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths) raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
17
0
import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) set_seed(770) __SCREAMING_SNAKE_CASE = { """c_attn""": """att_proj""", """c_proj""": """out_proj""", """c_fc""": """in_proj""", """transformer.""": """""", """h.""": """layers.""", """ln_1""": """layernorm_1""", """ln_2""": """layernorm_2""", """ln_f""": """layernorm_final""", """wpe""": """position_embeds_layer""", """wte""": """input_embeds_layer""", } __SCREAMING_SNAKE_CASE = { """text_small""": { """repo_id""": """suno/bark""", """file_name""": """text.pt""", }, """coarse_small""": { """repo_id""": """suno/bark""", """file_name""": """coarse.pt""", }, """fine_small""": { """repo_id""": """suno/bark""", """file_name""": """fine.pt""", }, """text""": { """repo_id""": """suno/bark""", """file_name""": """text_2.pt""", }, """coarse""": { """repo_id""": """suno/bark""", """file_name""": """coarse_2.pt""", }, """fine""": { """repo_id""": """suno/bark""", """file_name""": """fine_2.pt""", }, } __SCREAMING_SNAKE_CASE = os.path.dirname(os.path.abspath(__file__)) __SCREAMING_SNAKE_CASE = os.path.join(os.path.expanduser("""~"""), """.cache""") __SCREAMING_SNAKE_CASE = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""") def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=False ): A : List[str] = model_type if use_small: key += "_small" return os.path.join(_lowerCamelCase , REMOTE_MODEL_PATHS[key]["file_name"] ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) hf_hub_download(repo_id=_lowerCamelCase , filename=_lowerCamelCase , local_dir=_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase="text" ): if model_type == "text": A : Any = BarkSemanticModel A : Dict = BarkSemanticConfig A : Tuple = BarkSemanticGenerationConfig elif model_type == "coarse": A : Optional[int] = BarkCoarseModel A : Union[str, Any] = BarkCoarseConfig A : Optional[Any] = BarkCoarseGenerationConfig elif model_type == "fine": A : List[str] = BarkFineModel A : Optional[Any] = BarkFineConfig A : Any = BarkFineGenerationConfig else: raise NotImplementedError() A : List[Any] = f"""{model_type}_small""" if use_small else model_type A : Optional[int] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(_lowerCamelCase ): logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" ) _download(model_info["repo_id"] , model_info["file_name"] ) A : Optional[Any] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase ) # this is a hack A : Any = checkpoint["model_args"] if "input_vocab_size" not in model_args: A : Union[str, Any] = model_args["vocab_size"] A : Tuple = model_args["vocab_size"] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments A : Optional[Any] = model_args.pop("n_head" ) A : Optional[Any] = model_args.pop("n_embd" ) A : Any = model_args.pop("n_layer" ) A : int = ConfigClass(**checkpoint["model_args"] ) A : Optional[Any] = ModelClass(config=_lowerCamelCase ) A : int = GenerationConfigClass() A : Any = model_generation_config A : Optional[int] = checkpoint["model"] # fixup checkpoint A : Dict = "_orig_mod." for k, v in list(state_dict.items() ): if k.startswith(_lowerCamelCase ): # replace part of the key with corresponding layer name in HF implementation A : List[Any] = k[len(_lowerCamelCase ) :] for old_layer_name in new_layer_name_dict: A : List[str] = new_k.replace(_lowerCamelCase , new_layer_name_dict[old_layer_name] ) A : Optional[int] = state_dict.pop(_lowerCamelCase ) A : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() ) A : str = {k for k in extra_keys if not k.endswith(".attn.bias" )} A : str = set(model.state_dict().keys() ) - set(state_dict.keys() ) A : str = {k for k in missing_keys if not k.endswith(".attn.bias" )} if len(_lowerCamelCase ) != 0: raise ValueError(f"""extra keys found: {extra_keys}""" ) if len(_lowerCamelCase ) != 0: raise ValueError(f"""missing keys: {missing_keys}""" ) model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) A : List[str] = model.num_parameters(exclude_embeddings=_lowerCamelCase ) A : Optional[Any] = checkpoint["best_val_loss"].item() logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(_lowerCamelCase , 3 )} loss""" ) model.eval() model.to(_lowerCamelCase ) del checkpoint, state_dict return model def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase="text" ): if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() A : Dict = "cpu" # do conversion on cpu A : Tuple = _get_ckpt_path(_lowerCamelCase , use_small=_lowerCamelCase ) A : str = _load_model(_lowerCamelCase , _lowerCamelCase , model_type=_lowerCamelCase , use_small=_lowerCamelCase ) # load bark initial model A : Optional[Any] = _bark_load_model(_lowerCamelCase , "cpu" , model_type=_lowerCamelCase , use_small=_lowerCamelCase ) if model_type == "text": A : Tuple = bark_model["model"] if model.num_parameters(exclude_embeddings=_lowerCamelCase ) != bark_model.get_num_params(): raise ValueError("initial and new models don't have the same number of parameters" ) # check if same output as the bark model A : Union[str, Any] = 5 A : Tuple = 10 if model_type in ["text", "coarse"]: A : int = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) A : int = bark_model(_lowerCamelCase )[0] A : Dict = model(_lowerCamelCase ) # take last logits A : Dict = output_new_model_total.logits[:, [-1], :] else: A : str = 3 A : Optional[int] = 8 A : List[str] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) A : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase ) A : Optional[int] = bark_model(_lowerCamelCase , _lowerCamelCase ) A : List[Any] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("initial and new outputs don't have the same shape" ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError("initial and new outputs are not equal" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): A : str = os.path.join(_lowerCamelCase , _lowerCamelCase ) A : List[Any] = BarkSemanticConfig.from_pretrained(os.path.join(_lowerCamelCase , "config.json" ) ) A : Dict = BarkCoarseConfig.from_pretrained(os.path.join(_lowerCamelCase , "config.json" ) ) A : List[Any] = BarkFineConfig.from_pretrained(os.path.join(_lowerCamelCase , "config.json" ) ) A : Dict = EncodecConfig.from_pretrained("facebook/encodec_24khz" ) A : int = BarkSemanticModel.from_pretrained(_lowerCamelCase ) A : Tuple = BarkCoarseModel.from_pretrained(_lowerCamelCase ) A : Optional[Any] = BarkFineModel.from_pretrained(_lowerCamelCase ) A : Optional[int] = EncodecModel.from_pretrained("facebook/encodec_24khz" ) A : int = BarkConfig.from_sub_model_configs( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A : Tuple = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) A : Union[str, Any] = BarkModel(_lowerCamelCase ) A : Any = semantic A : Any = coarseAcoustic A : Any = fineAcoustic A : Tuple = codec A : Any = bark_generation_config Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) bark.save_pretrained(_lowerCamelCase , repo_id=_lowerCamelCase , push_to_hub=_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""") __SCREAMING_SNAKE_CASE = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
708
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str: A : List[Any] = parent A : Optional[int] = batch_size A : Any = image_size A : Optional[Any] = patch_size A : Optional[Any] = num_channels A : Tuple = is_training A : Optional[Any] = use_labels A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : Any = hidden_act A : Tuple = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = scope A : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : List[Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int: A : Optional[int] = DeiTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any: A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : List[str] = 1 A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict: A : str = self.type_sequence_label_size A : List[str] = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Any = 1 A : str = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ) : Tuple = config_and_inputs A : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = DeiTModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str: A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: if not self.model_tester.is_training: return A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Dict = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Tuple = False A : Any = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A : List[str] = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : int = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): A : Tuple = problem_type["title"] A : Optional[Any] = problem_type["num_labels"] A : List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A : int = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: A : Optional[Any] = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( __lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : List[str] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : str = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) A : Dict = self.default_image_processor A : Optional[int] = prepare_img() A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A : List[str] = model(__lowerCamelCase )
17
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""", } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "blip_2_vision_model" def __init__( self : Tuple , __lowerCamelCase : Any=14_08 , __lowerCamelCase : Union[str, Any]=61_44 , __lowerCamelCase : Any=39 , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : Any=2_24 , __lowerCamelCase : Tuple=14 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : str=0.00001 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Any=1e-10 , __lowerCamelCase : List[Any]=True , **__lowerCamelCase : Union[str, Any] , ) -> List[Any]: super().__init__(**__lowerCamelCase ) A : Dict = hidden_size A : Union[str, Any] = intermediate_size A : Optional[int] = num_hidden_layers A : Tuple = num_attention_heads A : List[str] = patch_size A : Any = image_size A : List[Any] = initializer_range A : Union[str, Any] = attention_dropout A : int = layer_norm_eps A : Dict = hidden_act A : Dict = qkv_bias @classmethod def SCREAMING_SNAKE_CASE__ ( cls : str , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__lowerCamelCase ) A : int = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": A : Tuple = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "blip_2_qformer" def __init__( self : Tuple , __lowerCamelCase : Union[str, Any]=3_05_22 , __lowerCamelCase : Union[str, Any]=7_68 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Optional[int]=30_72 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=5_12 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : List[Any]="absolute" , __lowerCamelCase : Any=2 , __lowerCamelCase : str=14_08 , **__lowerCamelCase : int , ) -> Optional[Any]: super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase ) A : List[Any] = vocab_size A : Optional[int] = hidden_size A : Dict = num_hidden_layers A : int = num_attention_heads A : Optional[Any] = hidden_act A : List[str] = intermediate_size A : Optional[Any] = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : Tuple = max_position_embeddings A : Optional[int] = initializer_range A : Optional[Any] = layer_norm_eps A : Optional[int] = position_embedding_type A : List[str] = cross_attention_frequency A : Union[str, Any] = encoder_hidden_size @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[int] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__lowerCamelCase ) A : Union[str, Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": A : Optional[int] = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "blip-2" a__ = True def __init__( self : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=32 , **__lowerCamelCase : Any ) -> Optional[int]: super().__init__(**__lowerCamelCase ) if vision_config is None: A : List[str] = {} logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." ) if qformer_config is None: A : Dict = {} logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." ) if text_config is None: A : Any = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) A : List[Any] = BlipaVisionConfig(**__lowerCamelCase ) A : List[Any] = BlipaQFormerConfig(**__lowerCamelCase ) A : Union[str, Any] = text_config["model_type"] if "model_type" in text_config else "opt" A : Tuple = CONFIG_MAPPING[text_model_type](**__lowerCamelCase ) A : Tuple = self.text_config.tie_word_embeddings A : Union[str, Any] = self.text_config.is_encoder_decoder A : Union[str, Any] = num_query_tokens A : str = self.vision_config.hidden_size A : int = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES A : Any = 1.0 A : Optional[int] = 0.02 @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , __lowerCamelCase : BlipaVisionConfig , __lowerCamelCase : BlipaQFormerConfig , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : Tuple , ) -> Tuple: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: A : str = copy.deepcopy(self.__dict__ ) A : List[str] = self.vision_config.to_dict() A : int = self.qformer_config.to_dict() A : Dict = self.text_config.to_dict() A : Optional[int] = self.__class__.model_type return output
709
from sklearn.metrics import recall_score import datasets __SCREAMING_SNAKE_CASE = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ __SCREAMING_SNAKE_CASE = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ __SCREAMING_SNAKE_CASE = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]: A : str = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
17
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
710
from collections import deque from .hash_table import HashTable class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]: super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) A : Dict = self.values[key] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
17
0
import torch from diffusers import DiffusionPipeline class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : int , __lowerCamelCase : str , __lowerCamelCase : List[str] ) -> Tuple: super().__init__() self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase ) def __call__( self : int ) -> List[Any]: A : Dict = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) A : Optional[Any] = 1 A : Tuple = self.unet(__lowerCamelCase , __lowerCamelCase ).sample A : Any = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample A : List[str] = scheduler_output - scheduler_output + torch.ones_like(__lowerCamelCase ) return result
711
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCamelCase_ : '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict: A : Optional[Any] = 4 A : List[str] = 32 A : Any = (32, 32) A : str = torch.manual_seed(0 ) A : int = torch.device(__lowerCamelCase ) A : List[str] = (batch_size, num_channels) + sizes A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) A : int = {"hidden_states": hidden_states} if include_temb: A : Any = 1_28 A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: A : str = torch.manual_seed(1 ) A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": A : Dict = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]: A , A : str = self.prepare_init_args_and_inputs_for_common() A : List[Any] = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): A : int = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) A : Any = output[0, -1, -3:, -3:] A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.prepare_init_args_and_inputs_for_common() A : str = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Optional[int] = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Optional[Any] = output[0] A : List[str] = torch.device(__lowerCamelCase ) A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase ) A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
17
0
import os from pathlib import Path def UpperCAmelCase ( ): from torch.utils.cpp_extension import load A : Optional[Any] = Path(_lowerCamelCase ).resolve().parent.parent.parent / "kernels" / "deformable_detr" A : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _lowerCamelCase , with_cuda=_lowerCamelCase , extra_include_paths=[str(_lowerCamelCase )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
712
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]: A : str = max_length A : Optional[int] = max_position_embeddings @add_start_docstrings(__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool: A : List[Any] = input_ids.shape[-1] A : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , ) A : str = start_length A : Optional[Any] = max_new_tokens A : Dict = start_length + max_new_tokens @add_start_docstrings(__lowerCamelCase ) def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]: A : str = max_time A : Dict = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool: return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: for stopping_criterium in self: if isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length elif isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = stopping_criteria.max_length A : Any = deepcopy(_lowerCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) ) return new_stopping_criteria
17
0
__SCREAMING_SNAKE_CASE = { 0: """0""", 1: """1""", 2: """2""", 3: """3""", 4: """4""", 5: """5""", 6: """6""", 7: """7""", 8: """8""", 9: """9""", 10: """a""", 11: """b""", 12: """c""", 13: """d""", 14: """e""", 15: """f""", } def UpperCAmelCase ( _lowerCamelCase ): assert type(_lowerCamelCase ) in (int, float) and decimal == int(_lowerCamelCase ) A : Optional[int] = int(_lowerCamelCase ) A : Dict = "" A : Dict = False if decimal < 0: A : List[str] = True decimal *= -1 while decimal > 0: A : Optional[Any] = divmod(_lowerCamelCase , 16 ) A : Any = values[remainder] + hexadecimal A : Tuple = "0x" + hexadecimal if negative: A : Optional[Any] = "-" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
713
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
0
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Tuple = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) A : Optional[int] = downstream_dict["projector.weight"] A : List[str] = downstream_dict["projector.bias"] A : List[str] = downstream_dict["model.post_net.linear.weight"] A : List[str] = downstream_dict["model.post_net.linear.bias"] return model def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : str = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) A : Tuple = downstream_dict["model.linear.weight"] A : str = downstream_dict["model.linear.bias"] return model def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : int = UniSpeechSatForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) A : Optional[int] = downstream_dict["connector.weight"] A : str = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): A : Dict = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] A : Union[str, Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] A : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] A : str = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] A : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] A : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] A : Optional[Any] = downstream_dict["objective.W"] return model @torch.no_grad() def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = torch.load(_lowerCamelCase , map_location="cpu" ) A : Dict = checkpoint["Downstream"] A : Dict = UniSpeechSatConfig.from_pretrained(_lowerCamelCase ) A : Any = WavaVecaFeatureExtractor.from_pretrained( _lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase ) A : Dict = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): A : str = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) elif arch.endswith("ForAudioFrameClassification" ): A : Optional[int] = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) elif arch.endswith("ForXVector" ): A : Optional[Any] = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: A : Union[str, Any] = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(_lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model.""" ) parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""") parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""") __SCREAMING_SNAKE_CASE = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
714
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
0
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = AudioLDMPipeline a__ = TEXT_TO_AUDIO_PARAMS a__ = TEXT_TO_AUDIO_BATCH_PARAMS a__ = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) A : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__lowerCamelCase , ) A : List[str] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , ) torch.manual_seed(0 ) A : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) A : List[str] = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , ) A : Any = ClapTextModelWithProjection(__lowerCamelCase ) A : List[str] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 ) A : List[str] = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__lowerCamelCase , ) A : Optional[int] = SpeechTaHifiGan(__lowerCamelCase ) A : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=0 ) -> int: if str(__lowerCamelCase ).startswith("mps" ): A : Any = torch.manual_seed(__lowerCamelCase ) else: A : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) A : List[Any] = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: A : int = "cpu" # ensure determinism for the device-dependent torch.Generator A : Dict = self.get_dummy_components() A : List[str] = AudioLDMPipeline(**__lowerCamelCase ) A : Tuple = audioldm_pipe.to(__lowerCamelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Optional[int] = self.get_dummy_inputs(__lowerCamelCase ) A : int = audioldm_pipe(**__lowerCamelCase ) A : Optional[Any] = output.audios[0] assert audio.ndim == 1 assert len(__lowerCamelCase ) == 2_56 A : int = audio[:10] A : Dict = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: A : List[str] = self.get_dummy_components() A : List[Any] = AudioLDMPipeline(**__lowerCamelCase ) A : Tuple = audioldm_pipe.to(__lowerCamelCase ) A : Optional[Any] = audioldm_pipe.to(__lowerCamelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : str = self.get_dummy_inputs(__lowerCamelCase ) A : Optional[Any] = 3 * [inputs["prompt"]] # forward A : List[str] = audioldm_pipe(**__lowerCamelCase ) A : Dict = output.audios[0] A : Tuple = self.get_dummy_inputs(__lowerCamelCase ) A : List[Any] = 3 * [inputs.pop("prompt" )] A : Optional[int] = audioldm_pipe.tokenizer( __lowerCamelCase , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="pt" , ) A : str = text_inputs["input_ids"].to(__lowerCamelCase ) A : str = audioldm_pipe.text_encoder( __lowerCamelCase , ) A : Tuple = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A : Optional[int] = F.normalize(__lowerCamelCase , dim=-1 ) A : Union[str, Any] = prompt_embeds # forward A : List[Any] = audioldm_pipe(**__lowerCamelCase ) A : str = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: A : Optional[Any] = self.get_dummy_components() A : List[str] = AudioLDMPipeline(**__lowerCamelCase ) A : List[Any] = audioldm_pipe.to(__lowerCamelCase ) A : Optional[int] = audioldm_pipe.to(__lowerCamelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Optional[Any] = self.get_dummy_inputs(__lowerCamelCase ) A : int = 3 * ["this is a negative prompt"] A : Dict = negative_prompt A : str = 3 * [inputs["prompt"]] # forward A : Tuple = audioldm_pipe(**__lowerCamelCase ) A : str = output.audios[0] A : List[str] = self.get_dummy_inputs(__lowerCamelCase ) A : Dict = 3 * [inputs.pop("prompt" )] A : Optional[int] = [] for p in [prompt, negative_prompt]: A : List[Any] = audioldm_pipe.tokenizer( __lowerCamelCase , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="pt" , ) A : int = text_inputs["input_ids"].to(__lowerCamelCase ) A : List[Any] = audioldm_pipe.text_encoder( __lowerCamelCase , ) A : List[Any] = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A : Dict = F.normalize(__lowerCamelCase , dim=-1 ) embeds.append(__lowerCamelCase ) A : List[Any] = embeds # forward A : Union[str, Any] = audioldm_pipe(**__lowerCamelCase ) A : Optional[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: A : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator A : List[Any] = self.get_dummy_components() A : Optional[int] = PNDMScheduler(skip_prk_steps=__lowerCamelCase ) A : Any = AudioLDMPipeline(**__lowerCamelCase ) A : List[str] = audioldm_pipe.to(__lowerCamelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : int = self.get_dummy_inputs(__lowerCamelCase ) A : Any = "egg cracking" A : List[Any] = audioldm_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase ) A : Any = output.audios[0] assert audio.ndim == 1 assert len(__lowerCamelCase ) == 2_56 A : Any = audio[:10] A : Tuple = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: A : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator A : Optional[Any] = self.get_dummy_components() A : List[Any] = PNDMScheduler(skip_prk_steps=__lowerCamelCase ) A : Tuple = AudioLDMPipeline(**__lowerCamelCase ) A : Optional[Any] = audioldm_pipe.to(__lowerCamelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Union[str, Any] = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) A : Optional[int] = audioldm_pipe(__lowerCamelCase , num_inference_steps=2 ).audios assert audios.shape == (1, 2_56) # test num_waveforms_per_prompt=1 (default) for batch of prompts A : str = 2 A : List[str] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_56) # test num_waveforms_per_prompt for single prompt A : Union[str, Any] = 2 A : Optional[Any] = audioldm_pipe(__lowerCamelCase , num_inference_steps=2 , num_waveforms_per_prompt=__lowerCamelCase ).audios assert audios.shape == (num_waveforms_per_prompt, 2_56) # test num_waveforms_per_prompt for batch of prompts A : Optional[int] = 2 A : Tuple = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__lowerCamelCase ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: A : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator A : int = self.get_dummy_components() A : List[Any] = AudioLDMPipeline(**__lowerCamelCase ) A : Optional[Any] = audioldm_pipe.to(__lowerCamelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Any = audioldm_pipe.vocoder.config.sampling_rate A : Tuple = self.get_dummy_inputs(__lowerCamelCase ) A : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **__lowerCamelCase ) A : str = output.audios[0] assert audio.ndim == 1 assert len(__lowerCamelCase ) / vocoder_sampling_rate == 0.016 A : Dict = audioldm_pipe(audio_length_in_s=0.032 , **__lowerCamelCase ) A : Dict = output.audios[0] assert audio.ndim == 1 assert len(__lowerCamelCase ) / vocoder_sampling_rate == 0.032 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Any = self.get_dummy_components() A : List[str] = AudioLDMPipeline(**__lowerCamelCase ) A : Any = audioldm_pipe.to(__lowerCamelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Tuple = ["hey"] A : Tuple = audioldm_pipe(__lowerCamelCase , num_inference_steps=1 ) A : Optional[int] = output.audios.shape assert audio_shape == (1, 2_56) A : Tuple = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A : Tuple = SpeechTaHifiGan(__lowerCamelCase ).to(__lowerCamelCase ) A : Any = audioldm_pipe(__lowerCamelCase , num_inference_steps=1 ) A : Any = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_56) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]: self._test_inference_batch_single_identical(test_mean_pixel_difference=__lowerCamelCase ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCamelCase ) @slow class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : str="cpu" , __lowerCamelCase : int=torch.floataa , __lowerCamelCase : Optional[Any]=0 ) -> Any: A : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) A : Optional[Any] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 8, 1_28, 16) ) A : Optional[Any] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) A : int = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: A : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) A : List[Any] = audioldm_pipe.to(__lowerCamelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Optional[Any] = self.get_inputs(__lowerCamelCase ) A : int = 25 A : Dict = audioldm_pipe(**__lowerCamelCase ).audios[0] assert audio.ndim == 1 assert len(__lowerCamelCase ) == 8_19_20 A : str = audio[7_72_30:7_72_40] A : Tuple = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) A : Union[str, Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: A : List[str] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) A : str = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A : str = audioldm_pipe.to(__lowerCamelCase ) audioldm_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : List[str] = self.get_inputs(__lowerCamelCase ) A : str = audioldm_pipe(**__lowerCamelCase ).audios[0] assert audio.ndim == 1 assert len(__lowerCamelCase ) == 8_19_20 A : Optional[int] = audio[2_77_80:2_77_90] A : Any = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) A : Union[str, Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
715
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
0
import fire from utils import calculate_rouge, save_json def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ): A : Union[str, Any] = [x.strip() for x in open(_lowerCamelCase ).readlines()] A : Tuple = [x.strip() for x in open(_lowerCamelCase ).readlines()][: len(_lowerCamelCase )] A : List[Any] = calculate_rouge(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) if save_path is not None: save_json(_lowerCamelCase , _lowerCamelCase , indent=_lowerCamelCase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
716
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int: A : Optional[int] = parent A : List[str] = batch_size A : Tuple = image_size A : List[str] = num_channels A : List[str] = embeddings_size A : List[str] = hidden_sizes A : str = depths A : Optional[Any] = is_training A : int = use_labels A : Optional[int] = hidden_act A : List[Any] = num_labels A : List[str] = scope A : str = len(__lowerCamelCase ) A : Optional[int] = out_features A : str = out_indices A : Optional[int] = num_groups def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple: A : Union[str, Any] = self.num_labels A : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]: A : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A : Optional[Any] = None A : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : List[str] = self.prepare_config_and_inputs() A , A , A : Tuple = config_and_inputs A : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Any = BitModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return @unittest.skip(reason="Bit does not output attentions" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) A : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Optional[Any] = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ): A : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Union[str, Any] = BitModelTester(self )
17
0
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: A : Any = "ylacombe/bark-small" A : Dict = tempfile.mkdtemp() A : Dict = "en_speaker_1" A : List[str] = "This is a test string" A : Any = "speaker_embeddings_path.json" A : int = "speaker_embeddings" def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> Any: return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: A : Any = self.get_tokenizer() A : Optional[int] = BarkProcessor(tokenizer=__lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) A : str = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]: A : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) A : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A : Optional[int] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str: A : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) A : Tuple = 35 A : Dict = 2 A : Dict = 8 A : str = { "semantic_prompt": np.ones(__lowerCamelCase ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset A : int = processor(text=self.input_string , voice_preset=__lowerCamelCase ) A : List[Any] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file A : Union[str, Any] = os.path.join(self.tmpdirname , "file.npz" ) np.savez(__lowerCamelCase , **__lowerCamelCase ) A : Optional[int] = processor(text=self.input_string , voice_preset=__lowerCamelCase ) A : Union[str, Any] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub A : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: A : str = self.get_tokenizer() A : Optional[int] = BarkProcessor(tokenizer=__lowerCamelCase ) A : List[Any] = processor(text=self.input_string ) A : List[Any] = tokenizer( self.input_string , padding="max_length" , max_length=2_56 , add_special_tokens=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
0
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) __SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict: A : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , ) A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history if not do_eval: return A : List[Any] = [log for log in logs if "eval_loss" in log.keys()] A : Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A : List[str] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: self.run_seqaseq_quick( distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A : Dict = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } A : List[str] = experiments[experiment_id] A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} A : Union[str, Any] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] ) A : Dict = len(re.findall(__lowerCamelCase , cl.err ) ) self.assertEqual(__lowerCamelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : int = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , ) # Check metrics A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history A : Dict = [log for log in logs if "eval_loss" in log.keys()] A : Dict = eval_metrics[0] A : int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) # test if do_predict saves generations and metrics A : Optional[Any] = os.listdir(__lowerCamelCase ) A : Any = {os.path.basename(__lowerCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]: A : Optional[int] = "--skip_memory_metrics 0" A : str = self.run_trainer( max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , ) # Check metrics A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) A : int = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A : int = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A : Tuple = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( __lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]: A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" A : Optional[int] = self.get_auto_remove_tmp_dir() A : int = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() A : Optional[Any] = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCamelCase )} """.split() A : Optional[Any] = "\n --do_predict\n ".split() A : Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A : Dict = get_gpu_count() A : Any = get_torch_dist_unique_port() A : Optional[Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() A : Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCamelCase , env=self.get_env() ) else: A : List[Any] = ["run_translation.py"] + args with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): main() return output_dir
718
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
0
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
719
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (PNDMScheduler,) a__ = (("num_inference_steps", 50),) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]: A : Union[str, Any] = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple: A : Dict = dict(self.forward_default_kwargs ) A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : Union[str, Any] = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Any = self.get_scheduler_config(**__lowerCamelCase ) A : int = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : Dict = scheduler_class.from_pretrained(__lowerCamelCase ) new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str: A : List[str] = dict(self.forward_default_kwargs ) A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : List[str] = self.dummy_sample A : Any = 0.1 * sample A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Tuple = self.get_scheduler_config() A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : str = scheduler_class.from_pretrained(__lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[:] A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]: A : Optional[Any] = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(**__lowerCamelCase ) A : str = scheduler_class(**__lowerCamelCase ) A : List[str] = 10 A : Union[str, Any] = self.dummy_model() A : int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A : Tuple = model(__lowerCamelCase , __lowerCamelCase ) A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: A : Union[str, Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) A : List[Any] = self.dummy_sample A : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCamelCase ) elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ): A : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A : str = 27 for scheduler_class in self.scheduler_classes: A : Tuple = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : Optional[Any] = self.full_loop() A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Any = self.full_loop(prediction_type="v_prediction" ) A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) A : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
17
0
__SCREAMING_SNAKE_CASE = {} def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on A : Optional[Any] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one A : List[Any] = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 A : int = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter A : Union[str, Any] = _calculate(days - 1 , _lowerCamelCase , 0 ) A : Any = state_late + state_absent + state_ontime A : Optional[Any] = prizestrings return prizestrings def UpperCAmelCase ( _lowerCamelCase = 30 ): return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
720
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
17
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __SCREAMING_SNAKE_CASE = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""") __SCREAMING_SNAKE_CASE = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) __SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "rb" ) as f: A : str = Image.open(_lowerCamelCase ) return im.convert("RGB" ) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = field( default=_A ,metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." } ,) a__ = field( default=_A ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) a__ = field(default=_A ,metadata={"help": "A folder containing the training data."} ) a__ = field(default=_A ,metadata={"help": "A folder containing the validation data."} ) a__ = field( default=0.15 ,metadata={"help": "Percent to split off of train for validation."} ) a__ = field( default=_A ,metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } ,) a__ = field( default=_A ,metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } ,) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = field( default="google/vit-base-patch16-224-in21k" ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ,) a__ = field( default=_A ,metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_A )} ,) a__ = field( default=_A ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) a__ = field( default=_A ,metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) a__ = field( default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,) a__ = field(default=_A ,metadata={"help": "Name or path of preprocessor config."} ) a__ = field( default=_A ,metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } ,) a__ = field( default=_A ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,) def UpperCAmelCase ( _lowerCamelCase ): A : Dict = torch.stack([example["pixel_values"] for example in examples] ) A : Union[str, Any] = torch.tensor([example["labels"] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A : Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_classification" , _lowerCamelCase , _lowerCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A : int = training_args.get_process_log_level() logger.setLevel(_lowerCamelCase ) transformers.utils.logging.set_verbosity(_lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. A : Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A : List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: A : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , ) else: A : Union[str, Any] = {} if data_args.train_dir is not None: A : Union[str, Any] = os.path.join(data_args.train_dir , "**" ) if data_args.validation_dir is not None: A : Optional[Any] = os.path.join(data_args.validation_dir , "**" ) A : Any = load_dataset( "imagefolder" , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir , task="image-classification" , ) # If we don't have a validation split, split off a percentage of train as validation. A : Union[str, Any] = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _lowerCamelCase ) and data_args.train_val_split > 0.0: A : int = dataset["train"].train_test_split(data_args.train_val_split ) A : Tuple = split["train"] A : List[Any] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. A : str = dataset["train"].features["labels"].names A : int = {}, {} for i, label in enumerate(_lowerCamelCase ): A : Union[str, Any] = str(_lowerCamelCase ) A : Dict = label # Load the accuracy metric from the datasets package A : Any = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_lowerCamelCase ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) A : str = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A : str = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) A : Union[str, Any] = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: A : Union[str, Any] = image_processor.size["shortest_edge"] else: A : int = (image_processor.size["height"], image_processor.size["width"]) A : int = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) A : Optional[Any] = Compose( [ RandomResizedCrop(_lowerCamelCase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) A : List[str] = Compose( [ Resize(_lowerCamelCase ), CenterCrop(_lowerCamelCase ), ToTensor(), normalize, ] ) def train_transforms(_lowerCamelCase ): A : Union[str, Any] = [ _train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"] ] return example_batch def val_transforms(_lowerCamelCase ): A : Tuple = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: A : List[str] = ( dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(_lowerCamelCase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: A : Tuple = ( dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(_lowerCamelCase ) # Initalize our trainer A : Optional[int] = Trainer( model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , ) # Training if training_args.do_train: A : int = None if training_args.resume_from_checkpoint is not None: A : Union[str, Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: A : int = last_checkpoint A : int = trainer.train(resume_from_checkpoint=_lowerCamelCase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: A : str = trainer.evaluate() trainer.log_metrics("eval" , _lowerCamelCase ) trainer.save_metrics("eval" , _lowerCamelCase ) # Write model card and (optionally) push to hub A : Any = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**_lowerCamelCase ) else: trainer.create_model_card(**_lowerCamelCase ) if __name__ == "__main__": main()
721
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
0
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } __SCREAMING_SNAKE_CASE = { """b0""": { """hidden_dim""": 1280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def UpperCAmelCase ( _lowerCamelCase ): A : List[str] = EfficientNetConfig() A : Optional[Any] = CONFIG_MAP[model_name]["hidden_dim"] A : Tuple = CONFIG_MAP[model_name]["width_coef"] A : List[Any] = CONFIG_MAP[model_name]["depth_coef"] A : Optional[Any] = CONFIG_MAP[model_name]["image_size"] A : int = CONFIG_MAP[model_name]["dropout_rate"] A : Optional[Any] = CONFIG_MAP[model_name]["dw_padding"] A : List[Any] = "huggingface/label-files" A : int = "imagenet-1k-id2label.json" A : Dict = 1000 A : int = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) A : Dict = {int(_lowerCamelCase ): v for k, v in idalabel.items()} A : Dict = idalabel A : Optional[Any] = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase ( ): A : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" A : Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im def UpperCAmelCase ( _lowerCamelCase ): A : Optional[int] = CONFIG_MAP[model_name]["image_size"] A : str = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_lowerCamelCase , ) return preprocessor def UpperCAmelCase ( _lowerCamelCase ): A : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] A : List[Any] = sorted(set(_lowerCamelCase ) ) A : Optional[int] = len(_lowerCamelCase ) A : List[str] = {b: str(_lowerCamelCase ) for b, i in zip(_lowerCamelCase , range(_lowerCamelCase ) )} A : Tuple = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: A : List[Any] = block_name_mapping[b] rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) A : List[Any] = {} for item in rename_keys: if item[0] in original_param_names: A : Tuple = "efficientnet." + item[1] A : Any = "classifier.weight" A : Union[str, Any] = "classifier.bias" return key_mapping def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): for key, value in tf_params.items(): if "normalization" in key: continue A : List[str] = key_mapping[key] if "_conv" in key and "kernel" in key: A : List[str] = torch.from_numpy(_lowerCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: A : List[Any] = torch.from_numpy(_lowerCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: A : List[str] = torch.from_numpy(np.transpose(_lowerCamelCase ) ) else: A : Union[str, Any] = torch.from_numpy(_lowerCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_lowerCamelCase ) @torch.no_grad() def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : int = model_classes[model_name]( include_top=_lowerCamelCase , weights="imagenet" , input_tensor=_lowerCamelCase , input_shape=_lowerCamelCase , pooling=_lowerCamelCase , classes=1000 , classifier_activation="softmax" , ) A : int = original_model.trainable_variables A : int = original_model.non_trainable_variables A : Optional[int] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: A : Optional[int] = param.numpy() A : Optional[int] = list(tf_params.keys() ) # Load HuggingFace model A : Optional[int] = get_efficientnet_config(_lowerCamelCase ) A : str = EfficientNetForImageClassification(_lowerCamelCase ).eval() A : Optional[int] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) A : Optional[Any] = rename_keys(_lowerCamelCase ) replace_params(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Initialize preprocessor and preprocess input image A : Optional[Any] = convert_image_processor(_lowerCamelCase ) A : Any = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): A : List[Any] = hf_model(**_lowerCamelCase ) A : Optional[Any] = outputs.logits.detach().numpy() # Original model inference A : Optional[Any] = False A : Any = CONFIG_MAP[model_name]["image_size"] A : Dict = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) A : Tuple = image.img_to_array(_lowerCamelCase ) A : Tuple = np.expand_dims(_lowerCamelCase , axis=0 ) A : Optional[Any] = original_model.predict(_lowerCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(_lowerCamelCase ): os.mkdir(_lowerCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_lowerCamelCase ) preprocessor.save_pretrained(_lowerCamelCase ) if push_to_hub: # Push model and image processor to hub print(f"""Pushing converted {model_name} to the hub...""" ) A : int = f"""efficientnet-{model_name}""" preprocessor.push_to_hub(_lowerCamelCase ) hf_model.push_to_hub(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") __SCREAMING_SNAKE_CASE = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
700
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # Initialise PyTorch model A : Optional[Any] = MobileBertConfig.from_json_file(_lowerCamelCase ) print(f"""Building PyTorch model from configuration: {config}""" ) A : Union[str, Any] = MobileBertForPreTraining(_lowerCamelCase ) # Load weights from tf checkpoint A : Union[str, Any] = load_tf_weights_in_mobilebert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
701
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
17
0
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = XLNetTokenizer a__ = XLNetTokenizerFast a__ = True a__ = True def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: super().setUp() # We have a SentencePiece fixture for testing A : Dict = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : List[Any] = "<s>" A : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]: A : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<eod>" ) self.assertEqual(len(__lowerCamelCase ) , 10_06 ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : List[str] = XLNetTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase ) A : Optional[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [2_85, 46, 10, 1_70, 3_82] ) A : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) A : Tuple = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) A : Dict = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: A : Dict = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase ) A : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + "", "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: A : int = XLNetTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase ) A : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A : List[Any] = XLNetTokenizer.from_pretrained("xlnet-base-cased" ) A : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) A : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) A : Any = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) A : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: # fmt: off A : str = {"input_ids": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""", """funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""", """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""", """funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""", } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "funnel" a__ = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self : int , __lowerCamelCase : Any=3_05_22 , __lowerCamelCase : int=[4, 4, 4] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=2 , __lowerCamelCase : int=7_68 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Any=30_72 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=1e-9 , __lowerCamelCase : Any="mean" , __lowerCamelCase : Optional[Any]="relative_shift" , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , **__lowerCamelCase : Optional[int] , ) -> Any: A : Dict = vocab_size A : Tuple = block_sizes A : Union[str, Any] = [1] * len(__lowerCamelCase ) if block_repeats is None else block_repeats assert len(__lowerCamelCase ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." A : int = num_decoder_layers A : Tuple = d_model A : int = n_head A : Any = d_head A : List[Any] = d_inner A : str = hidden_act A : Union[str, Any] = hidden_dropout A : int = attention_dropout A : Optional[int] = activation_dropout A : Tuple = initializer_range A : Optional[Any] = initializer_std A : Union[str, Any] = layer_norm_eps assert pooling_type in [ "mean", "max", ], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported.""" A : Tuple = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported.""" A : Union[str, Any] = attention_type A : Tuple = separate_cls A : List[str] = truncate_seq A : Dict = pool_q_only super().__init__(**__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str: return sum(self.block_sizes ) @num_hidden_layers.setter def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : int ) -> Dict: raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: return len(self.block_sizes ) @num_blocks.setter def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Tuple ) -> str: raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
703
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __SCREAMING_SNAKE_CASE = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase ( ): A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json" A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys() return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) ) def UpperCAmelCase ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : List[Any] = Path(_lowerCamelCase ) / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): init_hf_modules() A : Tuple = Path(_lowerCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : Optional[int] = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Union[str, Any] = f.read() # Imports of the form `import .xxx` A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(_lowerCamelCase ) ) def UpperCAmelCase ( _lowerCamelCase ): A : Optional[int] = False A : Tuple = [module_file] A : Optional[int] = [] # Let's recurse through all relative imports while not no_change: A : Optional[Any] = [] for f in files_to_check: new_imports.extend(get_relative_imports(_lowerCamelCase ) ) A : Optional[Any] = Path(_lowerCamelCase ).parent A : List[str] = [str(module_path / m ) for m in new_imports] A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports] A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files] A : Tuple = len(_lowerCamelCase ) == 0 all_relative_imports.extend(_lowerCamelCase ) return all_relative_imports def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Dict = f.read() # Imports of the form `import xxx` A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Only keep the top-level module A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all A : Any = list(set(_lowerCamelCase ) ) A : Tuple = [] for imp in imports: try: importlib.import_module(_lowerCamelCase ) except ImportError: missing_packages.append(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" ) return get_relative_imports(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : int = module_path.replace(os.path.sep , "." ) A : Optional[Any] = importlib.import_module(_lowerCamelCase ) if class_name is None: return find_pipeline_class(_lowerCamelCase ) return getattr(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): from ..pipelines import DiffusionPipeline A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) ) A : Union[str, Any] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _lowerCamelCase ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) A : Any = cls return pipeline_class def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ): A : List[Any] = str(_lowerCamelCase ) A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ): A : Union[str, Any] = module_file_or_url A : Any = "local" elif pretrained_model_name_or_path.count("/" ) == 0: A : Optional[Any] = get_diffusers_versions() # cut ".dev0" A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: A : List[Any] = latest_version if latest_version[1:] in available_versions else "main" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: A : Optional[Any] = f"""v{revision}""" elif revision == "main": A : Dict = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {", ".join(available_versions + ["main"] )}.""" ) # community pipeline on GitHub A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase ) try: A : Optional[int] = cached_download( _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = "git" A : Any = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached A : Any = hf_hub_download( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment A : List[str] = check_imports(_lowerCamelCase ) # Now we move the module inside our cached dynamic modules. A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_lowerCamelCase ) A : Optional[int] = Path(_lowerCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_lowerCamelCase , submodule_path / module_file ) for module_needed in modules_needed: A : int = f"""{module_needed}.py""" shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = use_auth_token elif use_auth_token is True: A : Dict = HfFolder.get_token() else: A : Tuple = None A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. A : str = submodule_path / commit_hash A : List[str] = full_submodule + os.path.sep + commit_hash create_dynamic_module(_lowerCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(_lowerCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return os.path.join(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ): A : int = get_cached_module_file( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) )
17
0
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = XLMTokenizer a__ = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A : Tuple = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] A : List[str] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) A : int = ["l o 123", "lo w 1456", "e r</w> 1789", ""] A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(__lowerCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Tuple ) -> Optional[Any]: A : int = "lower newer" A : Optional[Any] = "lower newer" return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: A : int = XLMTokenizer(self.vocab_file , self.merges_file ) A : Dict = "lower" A : Dict = ["low", "er</w>"] A : Optional[Any] = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) A : Tuple = tokens + ["<unk>"] A : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: A : Union[str, Any] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) A : Dict = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) A : int = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) A : int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) A : int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
704
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) __SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict: A : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , ) A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history if not do_eval: return A : List[Any] = [log for log in logs if "eval_loss" in log.keys()] A : Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A : List[str] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: self.run_seqaseq_quick( distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A : Dict = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } A : List[str] = experiments[experiment_id] A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} A : Union[str, Any] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] ) A : Dict = len(re.findall(__lowerCamelCase , cl.err ) ) self.assertEqual(__lowerCamelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : int = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , ) # Check metrics A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history A : Dict = [log for log in logs if "eval_loss" in log.keys()] A : Dict = eval_metrics[0] A : int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) # test if do_predict saves generations and metrics A : Optional[Any] = os.listdir(__lowerCamelCase ) A : Any = {os.path.basename(__lowerCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]: A : Optional[int] = "--skip_memory_metrics 0" A : str = self.run_trainer( max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , ) # Check metrics A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) A : int = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A : int = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A : Tuple = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( __lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]: A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" A : Optional[int] = self.get_auto_remove_tmp_dir() A : int = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() A : Optional[Any] = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCamelCase )} """.split() A : Optional[Any] = "\n --do_predict\n ".split() A : Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A : Dict = get_gpu_count() A : Any = get_torch_dist_unique_port() A : Optional[Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() A : Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCamelCase , env=self.get_env() ) else: A : List[Any] = ["run_translation.py"] + args with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): main() return output_dir
17
0
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Any = os.path.abspath(_lowerCamelCase ) logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model A : Any = tf.train.list_variables(_lowerCamelCase ) A : Optional[int] = [] A : Tuple = [] A : List[Any] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") A : Tuple = full_name.split("/" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(f"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' A : Tuple = name[1:] # figure out how many levels deep the name is A : Tuple = 0 for _name in name: if _name.startswith("layer_with_weights" ): depth += 1 else: break layer_depth.append(_lowerCamelCase ) # read data A : Union[str, Any] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase ) names.append("/".join(_lowerCamelCase ) ) arrays.append(_lowerCamelCase ) logger.info(f"""Read a total of {len(_lowerCamelCase ):,} layers""" ) # Sanity check if len(set(_lowerCamelCase ) ) != 1: raise ValueError(f"""Found layer names with different depths (layer depth {list(set(_lowerCamelCase ) )})""" ) A : Tuple = list(set(_lowerCamelCase ) )[0] if layer_depth != 1: raise ValueError( "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP" " heads." ) # convert layers logger.info("Converting weights..." ) for full_name, array in zip(_lowerCamelCase , _lowerCamelCase ): A : Dict = full_name.split("/" ) A : Any = model A : List[str] = [] for i, m_name in enumerate(_lowerCamelCase ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("layer_with_weights" ): A : Optional[int] = int(m_name.split("-" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["embeddings", "LayerNorm"] ) A : Dict = getattr(_lowerCamelCase , "embeddings" ) A : List[str] = getattr(_lowerCamelCase , "LayerNorm" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["encoder", "layer", str(layer_num - 4 )] ) A : Any = getattr(_lowerCamelCase , "encoder" ) A : str = getattr(_lowerCamelCase , "layer" ) A : Union[str, Any] = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["pooler", "dense"] ) A : Optional[Any] = getattr(_lowerCamelCase , "pooler" ) A : Optional[Any] = getattr(_lowerCamelCase , "dense" ) elif m_name == "embeddings": trace.append("embeddings" ) A : Dict = getattr(_lowerCamelCase , "embeddings" ) if layer_num == 0: trace.append("word_embeddings" ) A : Dict = getattr(_lowerCamelCase , "word_embeddings" ) elif layer_num == 1: trace.append("position_embeddings" ) A : str = getattr(_lowerCamelCase , "position_embeddings" ) elif layer_num == 2: trace.append("token_type_embeddings" ) A : Optional[int] = getattr(_lowerCamelCase , "token_type_embeddings" ) else: raise ValueError(f"""Unknown embedding layer with name {full_name}""" ) trace.append("weight" ) A : Tuple = getattr(_lowerCamelCase , "weight" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["attention", "self"] ) A : int = getattr(_lowerCamelCase , "attention" ) A : Union[str, Any] = getattr(_lowerCamelCase , "self" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["attention", "output", "LayerNorm"] ) A : Union[str, Any] = getattr(_lowerCamelCase , "attention" ) A : Union[str, Any] = getattr(_lowerCamelCase , "output" ) A : Any = getattr(_lowerCamelCase , "LayerNorm" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["attention", "output", "dense"] ) A : List[Any] = getattr(_lowerCamelCase , "attention" ) A : Dict = getattr(_lowerCamelCase , "output" ) A : List[str] = getattr(_lowerCamelCase , "dense" ) elif m_name == "_output_dense": # output dense trace.extend(["output", "dense"] ) A : int = getattr(_lowerCamelCase , "output" ) A : Dict = getattr(_lowerCamelCase , "dense" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["output", "LayerNorm"] ) A : Optional[int] = getattr(_lowerCamelCase , "output" ) A : Optional[int] = getattr(_lowerCamelCase , "LayerNorm" ) elif m_name == "_key_dense": # attention key trace.append("key" ) A : Optional[Any] = getattr(_lowerCamelCase , "key" ) elif m_name == "_query_dense": # attention query trace.append("query" ) A : List[Any] = getattr(_lowerCamelCase , "query" ) elif m_name == "_value_dense": # attention value trace.append("value" ) A : Optional[Any] = getattr(_lowerCamelCase , "value" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["intermediate", "dense"] ) A : Any = getattr(_lowerCamelCase , "intermediate" ) A : Tuple = getattr(_lowerCamelCase , "dense" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("output" ) A : List[Any] = getattr(_lowerCamelCase , "output" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("bias" ) A : Optional[int] = getattr(_lowerCamelCase , "bias" ) elif m_name in ["kernel", "gamma"]: trace.append("weight" ) A : str = getattr(_lowerCamelCase , "weight" ) else: logger.warning(f"""Ignored {m_name}""" ) # for certain layers reshape is necessary A : List[Any] = ".".join(_lowerCamelCase ) if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , _lowerCamelCase ) or re.match( R"(\S+)\.attention\.output\.dense\.weight" , _lowerCamelCase ): A : Any = array.reshape(pointer.data.shape ) if "kernel" in full_name: A : Tuple = array.transpose() if pointer.shape == array.shape: A : Union[str, Any] = torch.from_numpy(_lowerCamelCase ) else: raise ValueError( f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" f""" {array.shape}""" ) logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # Instantiate model logger.info(f"""Loading model based on config from {config_path}...""" ) A : List[str] = BertConfig.from_json_file(_lowerCamelCase ) A : Optional[Any] = BertModel(_lowerCamelCase ) # Load weights from checkpoint logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save pytorch-model logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , _lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model (must include filename).""", ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
705
from collections.abc import Sequence def UpperCAmelCase ( _lowerCamelCase = None ): if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A : Dict = nums[0] for i in range(1 , len(_lowerCamelCase ) ): A : Tuple = nums[i] A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip()) __SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
17
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "dpt" def __init__( self : Optional[int] , __lowerCamelCase : List[Any]=7_68 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=30_72 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]=1e-12 , __lowerCamelCase : List[Any]=3_84 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Tuple=False , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=[2, 5, 8, 11] , __lowerCamelCase : Tuple="project" , __lowerCamelCase : List[str]=[4, 2, 1, 0.5] , __lowerCamelCase : Tuple=[96, 1_92, 3_84, 7_68] , __lowerCamelCase : Optional[Any]=2_56 , __lowerCamelCase : str=-1 , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=0.4 , __lowerCamelCase : Any=2_55 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=[1, 10_24, 24, 24] , __lowerCamelCase : Optional[int]=[0, 1] , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Dict , ) -> Optional[int]: super().__init__(**__lowerCamelCase ) A : str = hidden_size A : int = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone." ) A : str = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } A : Optional[int] = BitConfig(**__lowerCamelCase ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): logger.info("Initializing the config with a `BiT` backbone." ) A : Union[str, Any] = BitConfig(**__lowerCamelCase ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = backbone_config else: raise ValueError( F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" ) A : int = backbone_featmap_shape A : Tuple = neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." ) else: A : int = None A : List[str] = None A : Any = [] A : int = num_hidden_layers A : str = num_attention_heads A : Union[str, Any] = intermediate_size A : List[Any] = hidden_act A : str = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : str = initializer_range A : List[Any] = layer_norm_eps A : Union[str, Any] = image_size A : Tuple = patch_size A : str = num_channels A : Dict = qkv_bias A : Union[str, Any] = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" ) A : List[Any] = readout_type A : List[Any] = reassemble_factors A : Union[str, Any] = neck_hidden_sizes A : Tuple = fusion_hidden_size A : Union[str, Any] = head_in_index A : Dict = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) A : List[Any] = use_auxiliary_head A : Optional[Any] = auxiliary_loss_weight A : Optional[Any] = semantic_loss_ignore_index A : Any = semantic_classifier_dropout def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Optional[int] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A : Tuple = self.backbone_config.to_dict() A : Tuple = self.__class__.model_type return output
706
from math import sqrt def UpperCAmelCase ( _lowerCamelCase = 100_0000 ): A : int = 0 A : int = 0 A : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
17
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def UpperCAmelCase ( _lowerCamelCase ): A : List[Any] = 384 if "tiny" in model_name: A : List[str] = [3, 3, 9, 3] A : List[str] = [96, 192, 384, 768] if "small" in model_name: A : Optional[Any] = [3, 3, 27, 3] A : str = [96, 192, 384, 768] if "base" in model_name: A : str = [3, 3, 27, 3] A : Tuple = [128, 256, 512, 1024] A : str = 512 if "large" in model_name: A : Optional[int] = [3, 3, 27, 3] A : int = [192, 384, 768, 1536] A : str = 768 if "xlarge" in model_name: A : Dict = [3, 3, 27, 3] A : Optional[Any] = [256, 512, 1024, 2048] A : Tuple = 1024 # set label information A : List[str] = 150 A : Dict = "huggingface/label-files" A : Union[str, Any] = "ade20k-id2label.json" A : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) A : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} A : int = {v: k for k, v in idalabel.items()} A : Optional[int] = ConvNextConfig( depths=_lowerCamelCase , hidden_sizes=_lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] ) A : Optional[Any] = UperNetConfig( backbone_config=_lowerCamelCase , auxiliary_in_channels=_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , ) return config def UpperCAmelCase ( _lowerCamelCase ): A : str = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") ) if i > 0: rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") ) rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Union[str, Any] = dct.pop(_lowerCamelCase ) A : List[str] = val def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Union[str, Any] = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } A : Optional[Any] = model_name_to_url[model_name] A : Dict = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"] A : Optional[Any] = get_upernet_config(_lowerCamelCase ) A : Dict = UperNetForSemanticSegmentation(_lowerCamelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A : List[str] = state_dict.pop(_lowerCamelCase ) if "bn" in key: A : Tuple = key.replace("bn" , "batch_norm" ) A : Optional[int] = val # rename keys A : str = create_rename_keys(_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) # verify on image A : Any = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" A : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" ) A : List[Any] = SegformerImageProcessor() A : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values with torch.no_grad(): A : int = model(_lowerCamelCase ) if model_name == "upernet-convnext-tiny": A : Union[str, Any] = torch.tensor( [[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ) elif model_name == "upernet-convnext-small": A : Optional[int] = torch.tensor( [[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] ) elif model_name == "upernet-convnext-base": A : str = torch.tensor( [[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] ) elif model_name == "upernet-convnext-large": A : int = torch.tensor( [[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] ) elif model_name == "upernet-convnext-xlarge": A : Any = torch.tensor( [[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCamelCase ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(f"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(f"""openmmlab/{model_name}""" ) processor.push_to_hub(f"""openmmlab/{model_name}""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-convnext-tiny""", type=str, choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]], help="""Name of the ConvNext UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
707
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE = """.""" if __name__ == "__main__": __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE = line.strip() __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths) raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
17
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", } } __SCREAMING_SNAKE_CASE = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } # Segments (not really needed) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = 4 class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = "left" def __init__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : str=False , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : int="<sep>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Tuple="<cls>" , __lowerCamelCase : Optional[Any]="<mask>" , __lowerCamelCase : int=["<eop>", "<eod>"] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Optional[int] , ) -> None: # Mask token behave like a normal word, i.e. include the space before it A : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) A : int = 3 A : Union[str, Any] = do_lower_case A : List[Any] = remove_space A : List[Any] = keep_accents A : str = vocab_file A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : str = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> Optional[Any]: A : Optional[int] = self.__dict__.copy() A : Dict = None return state def __setstate__( self : Any , __lowerCamelCase : Any ) -> Dict: A : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A : Optional[int] = {} A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : int ) -> Any: if self.remove_space: A : Union[str, Any] = " ".join(inputs.strip().split() ) else: A : str = inputs A : Dict = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: A : Optional[int] = unicodedata.normalize("NFKD" , __lowerCamelCase ) A : Tuple = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] ) if self.do_lower_case: A : Tuple = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]: A : Dict = self.preprocess_text(__lowerCamelCase ) A : Tuple = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) A : Any = [] for piece in pieces: if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): A : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A : Optional[Any] = cur_pieces[1:] else: A : List[str] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__lowerCamelCase ) else: new_pieces.append(__lowerCamelCase ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Tuple ) -> Dict: return self.sp_model.PieceToId(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[int] ) -> List[str]: return self.sp_model.IdToPiece(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Tuple: A : Any = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Union[str, Any] , ) -> str: A : Any = kwargs.pop("use_source_tokenizer" , __lowerCamelCase ) A : Any = self.convert_ids_to_tokens(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A : Dict = [] A : Dict = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) ) A : Optional[Any] = [] sub_texts.append(__lowerCamelCase ) else: current_sub_text.append(__lowerCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens A : Any = "".join(__lowerCamelCase ) A : Optional[int] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A : Dict = self.clean_up_tokenization(__lowerCamelCase ) return clean_text else: return text def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : Dict = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is not None: return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1] return ([0] * len(__lowerCamelCase )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : Optional[Any] = [self.sep_token_id] A : Dict = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return A : str = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: A : Tuple = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
708
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str: A : List[Any] = parent A : Optional[int] = batch_size A : Any = image_size A : Optional[Any] = patch_size A : Optional[Any] = num_channels A : Tuple = is_training A : Optional[Any] = use_labels A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : Any = hidden_act A : Tuple = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = scope A : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : List[Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int: A : Optional[int] = DeiTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any: A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : List[str] = 1 A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict: A : str = self.type_sequence_label_size A : List[str] = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Any = 1 A : str = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ) : Tuple = config_and_inputs A : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = DeiTModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str: A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: if not self.model_tester.is_training: return A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Dict = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Tuple = False A : Any = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A : List[str] = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : int = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): A : Tuple = problem_type["title"] A : Optional[Any] = problem_type["num_labels"] A : List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A : int = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: A : Optional[Any] = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( __lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : List[str] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : str = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) A : Dict = self.default_image_processor A : Optional[int] = prepare_img() A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A : List[str] = model(__lowerCamelCase )
17
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """OPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OPTForCausalLM""", """OPTModel""", """OPTPreTrainedModel""", """OPTForSequenceClassification""", """OPTForQuestionAnswering""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """FlaxOPTForCausalLM""", """FlaxOPTModel""", """FlaxOPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
709
from sklearn.metrics import recall_score import datasets __SCREAMING_SNAKE_CASE = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ __SCREAMING_SNAKE_CASE = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ __SCREAMING_SNAKE_CASE = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]: A : str = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
17
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
710
from collections import deque from .hash_table import HashTable class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]: super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) A : Dict = self.values[key] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
17
0
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowerCamelCase_ ( _A ): '''simple docstring''' def __lt__( self : Dict , __lowerCamelCase : Optional[Any] ) -> int: return self[-1] < other[-1] def __eq__( self : Dict , __lowerCamelCase : Optional[int] ) -> Optional[Any]: return self[-1] == other[-1] def UpperCAmelCase ( _lowerCamelCase ): A : list[Stack] = [] # sort into stacks for element in collection: A : List[Any] = Stack([element] ) A : Tuple = bisect_left(_lowerCamelCase , _lowerCamelCase ) if i != len(_lowerCamelCase ): stacks[i].append(_lowerCamelCase ) else: stacks.append(_lowerCamelCase ) # use a heap-based merge to merge stack efficiently A : Any = merge(*(reversed(_lowerCamelCase ) for stack in stacks) ) return collection if __name__ == "__main__": __SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip() __SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")] print(patience_sort(unsorted))
711
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCamelCase_ : '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict: A : Optional[Any] = 4 A : List[str] = 32 A : Any = (32, 32) A : str = torch.manual_seed(0 ) A : int = torch.device(__lowerCamelCase ) A : List[str] = (batch_size, num_channels) + sizes A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) A : int = {"hidden_states": hidden_states} if include_temb: A : Any = 1_28 A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: A : str = torch.manual_seed(1 ) A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": A : Dict = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]: A , A : str = self.prepare_init_args_and_inputs_for_common() A : List[Any] = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): A : int = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) A : Any = output[0, -1, -3:, -3:] A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.prepare_init_args_and_inputs_for_common() A : str = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Optional[int] = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Optional[Any] = output[0] A : List[str] = torch.device(__lowerCamelCase ) A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase ) A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
17
0
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __SCREAMING_SNAKE_CASE = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = True a__ = None # Automatically constructed a__ = "PIL.Image.Image" a__ = pa.struct({"bytes": pa.binary(), "path": pa.string()} ) a__ = field(default="Image" ,init=_A ,repr=_A ) def __call__( self : str ) -> Union[str, Any]: return self.pa_type def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Tuple = np.array(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): return {"path": value, "bytes": None} elif isinstance(__lowerCamelCase , __lowerCamelCase ): return {"path": None, "bytes": value} elif isinstance(__lowerCamelCase , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(__lowerCamelCase ) elif isinstance(__lowerCamelCase , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(__lowerCamelCase ) elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : int=None ) -> "PIL.Image.Image": if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support decoding images, please install 'Pillow'." ) if token_per_repo_id is None: A : List[Any] = {} A : Any = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(__lowerCamelCase ): A : List[Any] = PIL.Image.open(__lowerCamelCase ) else: A : Any = path.split("::" )[-1] try: A : int = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"] A : Tuple = token_per_repo_id.get(__lowerCamelCase ) except ValueError: A : Any = None with xopen(__lowerCamelCase , "rb" , use_auth_token=__lowerCamelCase ) as f: A : Optional[Any] = BytesIO(f.read() ) A : List[Any] = PIL.Image.open(bytes_ ) else: A : Union[str, Any] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return ( self if self.decode else { "bytes": Value("binary" ), "path": Value("string" ), } ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray: if pa.types.is_string(storage.type ): A : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() ) A : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): A : Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) A : int = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: A : Optional[int] = storage.field("bytes" ) else: A : Optional[Any] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: A : Optional[Any] = storage.field("path" ) else: A : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) A : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): A : int = pa.array( [encode_np_array(np.array(__lowerCamelCase ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) A : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) A : List[str] = pa.StructArray.from_arrays( [bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(__lowerCamelCase , self.pa_type ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : pa.StructArray ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(__lowerCamelCase : Optional[int] ): with xopen(__lowerCamelCase , "rb" ) as f: A : Union[str, Any] = f.read() return bytes_ A : int = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) A : List[str] = pa.array( [os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) A : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(__lowerCamelCase , self.pa_type ) def UpperCAmelCase ( ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() A : Any = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def UpperCAmelCase ( _lowerCamelCase ): A : int = BytesIO() if image.format in list_image_compression_formats(): A : str = image.format else: A : List[Any] = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF" image.save(_lowerCamelCase , format=_lowerCamelCase ) return buffer.getvalue() def UpperCAmelCase ( _lowerCamelCase ): if hasattr(_lowerCamelCase , "filename" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(_lowerCamelCase )} def UpperCAmelCase ( _lowerCamelCase ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) A : Tuple = array.dtype A : Union[str, Any] = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER A : Union[str, Any] = dtype.kind A : List[Any] = dtype.itemsize A : int = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: A : Dict = np.dtype("|u1" ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: A : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: A : int = dtype_byteorder + dtype_kind + str(_lowerCamelCase ) A : int = np.dtype(_lowerCamelCase ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) A : str = PIL.Image.fromarray(array.astype(_lowerCamelCase ) ) return {"path": None, "bytes": image_to_bytes(_lowerCamelCase )} def UpperCAmelCase ( _lowerCamelCase ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if objs: A : Dict = first_non_null_value(_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(_lowerCamelCase , np.ndarray ): A : int = no_op_if_value_is_null(_lowerCamelCase ) return [obj_to_image_dict_func(_lowerCamelCase ) for obj in objs] elif isinstance(_lowerCamelCase , PIL.Image.Image ): A : Optional[Any] = no_op_if_value_is_null(_lowerCamelCase ) return [obj_to_image_dict_func(_lowerCamelCase ) for obj in objs] else: return objs else: return objs
712
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]: A : str = max_length A : Optional[int] = max_position_embeddings @add_start_docstrings(__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool: A : List[Any] = input_ids.shape[-1] A : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , ) A : str = start_length A : Optional[Any] = max_new_tokens A : Dict = start_length + max_new_tokens @add_start_docstrings(__lowerCamelCase ) def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]: A : str = max_time A : Dict = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool: return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: for stopping_criterium in self: if isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length elif isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = stopping_criteria.max_length A : Any = deepcopy(_lowerCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) ) return new_stopping_criteria
17
0
import argparse from collections import defaultdict import yaml __SCREAMING_SNAKE_CASE = """docs/source/en/_toctree.yml""" def UpperCAmelCase ( _lowerCamelCase ): A : Union[str, Any] = defaultdict(_lowerCamelCase ) A : Dict = [] A : List[Any] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(_lowerCamelCase ) A : str = new_doc_list A : Optional[int] = [key for key, value in counts.items() if value > 1] A : Optional[int] = [] for duplicate_key in duplicates: A : Optional[Any] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(_lowerCamelCase ) > 1: raise ValueError( f"""{duplicate_key} is present several times in the documentation table of content at """ "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) A : Optional[int] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(_lowerCamelCase ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(_lowerCamelCase ) # Sort return overview_doc def UpperCAmelCase ( _lowerCamelCase=False ): with open(_lowerCamelCase , encoding="utf-8" ) as f: A : List[Any] = yaml.safe_load(f.read() ) # Get to the API doc A : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 A : List[Any] = content[api_idx]["sections"] # Then to the model doc A : List[Any] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 A : Tuple = api_doc[scheduler_idx]["sections"] A : int = clean_doc_toc(_lowerCamelCase ) A : List[Any] = False if new_scheduler_doc != scheduler_doc: A : List[Any] = True if overwrite: A : Tuple = new_scheduler_doc if diff: if overwrite: A : Optional[int] = api_doc with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def UpperCAmelCase ( _lowerCamelCase=False ): with open(_lowerCamelCase , encoding="utf-8" ) as f: A : Optional[Any] = yaml.safe_load(f.read() ) # Get to the API doc A : Tuple = 0 while content[api_idx]["title"] != "API": api_idx += 1 A : Any = content[api_idx]["sections"] # Then to the model doc A : Any = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 A : Any = False A : int = api_doc[pipeline_idx]["sections"] A : Tuple = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: A : str = pipeline_doc["section"] A : Optional[int] = clean_doc_toc(_lowerCamelCase ) if overwrite: A : Any = new_sub_pipeline_doc new_pipeline_docs.append(_lowerCamelCase ) # sort overall pipeline doc A : Union[str, Any] = clean_doc_toc(_lowerCamelCase ) if new_pipeline_docs != pipeline_docs: A : Tuple = True if overwrite: A : Union[str, Any] = new_pipeline_docs if diff: if overwrite: A : int = api_doc with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") __SCREAMING_SNAKE_CASE = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
713
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = ["pixel_values"] def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Dict[str, int]] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_55 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : Any , ) -> None: super().__init__(**__lowerCamelCase ) A : Any = size if size is not None else {"shortest_edge": 2_56} A : Optional[Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) A : Tuple = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} A : List[str] = get_size_dict(__lowerCamelCase , param_name="crop_size" ) A : Optional[int] = do_resize A : Optional[int] = size A : List[Any] = resample A : Optional[int] = do_center_crop A : List[Any] = crop_size A : Dict = do_rescale A : Any = rescale_factor A : Dict = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Tuple , ) -> np.ndarray: A : int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) A : Union[str, Any] = get_resize_output_image_size(__lowerCamelCase , size=size["shortest_edge"] , default_to_square=__lowerCamelCase ) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ) -> np.ndarray: A : str = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : float , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] ) -> np.ndarray: return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Union[str, Any] , ) -> np.ndarray: return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : ImageInput , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[float] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : List[str] , ) -> str: A : int = do_resize if do_resize is not None else self.do_resize A : str = size if size is not None else self.size A : List[str] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) A : Any = resample if resample is not None else self.resample A : int = do_center_crop if do_center_crop is not None else self.do_center_crop A : int = crop_size if crop_size is not None else self.crop_size A : Any = get_size_dict(__lowerCamelCase , param_name="crop_size" ) A : Dict = do_rescale if do_rescale is not None else self.do_rescale A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Optional[Any] = image_mean if image_mean is not None else self.image_mean A : Dict = image_std if image_std is not None else self.image_std A : Optional[int] = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. A : Union[str, Any] = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: A : List[str] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_center_crop: A : List[str] = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images] if do_rescale: A : Tuple = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: A : str = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] A : Optional[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] A : int = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Tuple] = None ) -> Union[str, Any]: A : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__lowerCamelCase ): A : List[Any] = target_sizes.numpy() A : Optional[Any] = [] for idx in range(len(__lowerCamelCase ) ): A : Optional[int] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__lowerCamelCase ) A : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowerCamelCase ) else: A : Tuple = logits.argmax(dim=1 ) A : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
714
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
0
'''simple docstring''' import requests __SCREAMING_SNAKE_CASE = """""" # <-- Put your OpenWeatherMap appid here! __SCREAMING_SNAKE_CASE = """https://api.openweathermap.org/data/2.5/""" def UpperCAmelCase ( _lowerCamelCase = "Chicago" , _lowerCamelCase = APPID ): return requests.get(URL_BASE + "weather" , params=locals() ).json() def UpperCAmelCase ( _lowerCamelCase = "Kolkata, India" , _lowerCamelCase = APPID ): return requests.get(URL_BASE + "forecast" , params=locals() ).json() def UpperCAmelCase ( _lowerCamelCase = 55.68 , _lowerCamelCase = 12.57 , _lowerCamelCase = APPID ): return requests.get(URL_BASE + "onecall" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __SCREAMING_SNAKE_CASE = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
715
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "vivit" def __init__( self : str , __lowerCamelCase : Dict=2_24 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : int=[2, 16, 16] , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=7_68 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Any=30_72 , __lowerCamelCase : List[str]="gelu_fast" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Optional[Any]=1e-06 , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : str , ) -> List[str]: A : Union[str, Any] = hidden_size A : str = num_hidden_layers A : int = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Optional[int] = attention_probs_dropout_prob A : int = initializer_range A : List[Any] = layer_norm_eps A : Optional[Any] = image_size A : Union[str, Any] = num_frames A : Any = tubelet_size A : int = num_channels A : List[str] = qkv_bias super().__init__(**__lowerCamelCase )
716
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int: A : Optional[int] = parent A : List[str] = batch_size A : Tuple = image_size A : List[str] = num_channels A : List[str] = embeddings_size A : List[str] = hidden_sizes A : str = depths A : Optional[Any] = is_training A : int = use_labels A : Optional[int] = hidden_act A : List[Any] = num_labels A : List[str] = scope A : str = len(__lowerCamelCase ) A : Optional[int] = out_features A : str = out_indices A : Optional[int] = num_groups def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple: A : Union[str, Any] = self.num_labels A : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]: A : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A : Optional[Any] = None A : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : List[str] = self.prepare_config_and_inputs() A , A , A : Tuple = config_and_inputs A : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Any = BitModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return @unittest.skip(reason="Bit does not output attentions" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) A : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Optional[Any] = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ): A : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Union[str, Any] = BitModelTester(self )
17
0
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str: A : List[Any] = parent A : Optional[int] = batch_size A : Any = image_size A : Optional[Any] = patch_size A : Optional[Any] = num_channels A : Tuple = is_training A : Optional[Any] = use_labels A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : Any = hidden_act A : Tuple = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = scope A : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : List[Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int: A : Optional[int] = DeiTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any: A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : List[str] = 1 A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict: A : str = self.type_sequence_label_size A : List[str] = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Any = 1 A : str = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() ( A ) : Tuple = config_and_inputs A : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = DeiTModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str: A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: if not self.model_tester.is_training: return A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Dict = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Tuple = False A : Any = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A : List[str] = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : int = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): A : Tuple = problem_type["title"] A : Optional[Any] = problem_type["num_labels"] A : List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A : int = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: A : Optional[Any] = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( __lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : List[str] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : str = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) A : Dict = self.default_image_processor A : Optional[int] = prepare_img() A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A : List[str] = model(__lowerCamelCase )
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
0
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : List[str] = len(_lowerCamelCase ) A : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): A : str = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): A : str = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: A : int = subset[i - 1][j] if arr[i - 1] <= j: A : Tuple = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
718
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
0
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset __SCREAMING_SNAKE_CASE = """bert-base-cased""" __SCREAMING_SNAKE_CASE = """google/pegasus-xsum""" __SCREAMING_SNAKE_CASE = [""" Sam ate lunch today.""", """Sams lunch ingredients."""] __SCREAMING_SNAKE_CASE = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""] __SCREAMING_SNAKE_CASE = """patrickvonplaten/t5-tiny-random""" __SCREAMING_SNAKE_CASE = """sshleifer/bart-tiny-random""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-marian-en-de""" def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : str = "\n".join(_lowerCamelCase ) Path(_lowerCamelCase ).open("w" ).writelines(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(_lowerCamelCase , f"""{split}.source""" ) , _lowerCamelCase ) _dump_articles(os.path.join(_lowerCamelCase , f"""{split}.target""" ) , _lowerCamelCase ) return tmp_dir class lowerCamelCase_ ( _A ): '''simple docstring''' @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Tuple: A : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase ) A : Tuple = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) A : Optional[Any] = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in ARTICLES ) A : Optional[Any] = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in SUMMARIES ) A : Tuple = 4 A : int = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated A : Optional[Any] = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. A : Any = SeqaSeqDataset( __lowerCamelCase , data_dir=__lowerCamelCase , type_path="train" , max_source_length=__lowerCamelCase , max_target_length=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , ) A : List[Any] = DataLoader(__lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(__lowerCamelCase , __lowerCamelCase ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place A : str = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[str] ) -> List[str]: A : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCamelCase ) A : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) A : List[Any] = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in ARTICLES ) A : int = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in SUMMARIES ) A : List[str] = 4 A : str = LegacySeqaSeqDataset( __lowerCamelCase , data_dir=__lowerCamelCase , type_path="train" , max_source_length=20 , max_target_length=__lowerCamelCase , ) A : List[str] = DataLoader(__lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]: A : Tuple = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) A : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) A : Optional[int] = tmp_dir.joinpath("train.source" ).open().readlines() A : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(__lowerCamelCase , __lowerCamelCase , 1_28 , __lowerCamelCase ) A : List[str] = {x.name for x in tmp_dir.iterdir()} A : Tuple = {x.name for x in save_dir.iterdir()} A : List[str] = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(__lowerCamelCase ) < len(__lowerCamelCase ) assert len(__lowerCamelCase ) == 1 assert len(packed_examples[0] ) == sum(len(__lowerCamelCase ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: if not FAIRSEQ_AVAILABLE: return A : Any = self._get_dataset(max_len=64 ) A : List[Any] = 64 A : str = ds.make_dynamic_sampler(__lowerCamelCase , required_batch_size_multiple=__lowerCamelCase ) A : Optional[Any] = [len(__lowerCamelCase ) for x in batch_sampler] assert len(set(__lowerCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(__lowerCamelCase ) == len(__lowerCamelCase ) # no dropped or added examples A : Any = DataLoader(__lowerCamelCase , batch_sampler=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 ) A : List[Any] = [] A : Union[str, Any] = [] for batch in data_loader: A : Union[str, Any] = batch["input_ids"].shape A : str = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple A : int = np.product(batch["input_ids"].shape ) num_src_per_batch.append(__lowerCamelCase ) if num_src_tokens > (max_tokens * 1.1): failures.append(__lowerCamelCase ) assert num_src_per_batch[0] == max(__lowerCamelCase ) if failures: raise AssertionError(F"""too many tokens in {len(__lowerCamelCase )} batches""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: A : Dict = self._get_dataset(max_len=5_12 ) A : List[str] = 2 A : List[str] = ds.make_sortish_sampler(__lowerCamelCase , shuffle=__lowerCamelCase ) A : Any = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 ) A : List[str] = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowerCamelCase ) A : Optional[Any] = tokenizer.pad_token_id def count_pad_tokens(__lowerCamelCase : List[str] , __lowerCamelCase : Dict="input_ids" ): return [batch[k].eq(__lowerCamelCase ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(__lowerCamelCase , k="labels" ) ) < sum(count_pad_tokens(__lowerCamelCase , k="labels" ) ) assert sum(count_pad_tokens(__lowerCamelCase ) ) < sum(count_pad_tokens(__lowerCamelCase ) ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any=10_00 , __lowerCamelCase : List[Any]=1_28 ) -> Any: if os.getenv("USE_REAL_DATA" , __lowerCamelCase ): A : List[Any] = "examples/seq2seq/wmt_en_ro" A : Dict = max_len * 2 * 64 if not Path(__lowerCamelCase ).joinpath("train.len" ).exists(): save_len_file(__lowerCamelCase , __lowerCamelCase ) else: A : Tuple = "examples/seq2seq/test_data/wmt_en_ro" A : List[str] = max_len * 4 save_len_file(__lowerCamelCase , __lowerCamelCase ) A : Any = AutoTokenizer.from_pretrained(__lowerCamelCase ) A : Tuple = SeqaSeqDataset( __lowerCamelCase , data_dir=__lowerCamelCase , type_path="train" , max_source_length=__lowerCamelCase , max_target_length=__lowerCamelCase , n_obs=__lowerCamelCase , ) return ds, max_tokens, tokenizer def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: A : Any = self._get_dataset() A : Dict = set(DistributedSortishSampler(__lowerCamelCase , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__lowerCamelCase ) ) A : Union[str, Any] = set(DistributedSortishSampler(__lowerCamelCase , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__lowerCamelCase ) ) assert idsa.intersection(__lowerCamelCase ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]: A : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase ) if tok_name == MBART_TINY: A : str = SeqaSeqDataset( __lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) A : int = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: A : Dict = SeqaSeqDataset( __lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) A : int = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(__lowerCamelCase ) == 1 if tok_name == BART_TINY else len(__lowerCamelCase ) == 0
719
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (PNDMScheduler,) a__ = (("num_inference_steps", 50),) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]: A : Union[str, Any] = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple: A : Dict = dict(self.forward_default_kwargs ) A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : Union[str, Any] = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Any = self.get_scheduler_config(**__lowerCamelCase ) A : int = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : Dict = scheduler_class.from_pretrained(__lowerCamelCase ) new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str: A : List[str] = dict(self.forward_default_kwargs ) A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : List[str] = self.dummy_sample A : Any = 0.1 * sample A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Tuple = self.get_scheduler_config() A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : str = scheduler_class.from_pretrained(__lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[:] A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]: A : Optional[Any] = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(**__lowerCamelCase ) A : str = scheduler_class(**__lowerCamelCase ) A : List[str] = 10 A : Union[str, Any] = self.dummy_model() A : int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A : Tuple = model(__lowerCamelCase , __lowerCamelCase ) A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: A : Union[str, Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) A : List[Any] = self.dummy_sample A : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCamelCase ) elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ): A : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A : str = 27 for scheduler_class in self.scheduler_classes: A : Tuple = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : Optional[Any] = self.full_loop() A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Any = self.full_loop(prediction_type="v_prediction" ) A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) A : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
17
0
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def UpperCAmelCase ( ): raise RuntimeError("CUDA out of memory." ) class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple ) -> Tuple: super().__init__() A : Dict = nn.Linear(3 , 4 ) A : Dict = nn.BatchNormad(4 ) A : Optional[Any] = nn.Linear(4 , 5 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> str: return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) ) class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : Union[str, Any] = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(__lowerCamelCase : Optional[Any] ): nonlocal batch_sizes batch_sizes.append(__lowerCamelCase ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(__lowerCamelCase , [1_28, 64, 32, 16, 8] ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : Optional[int] = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(__lowerCamelCase : Tuple , __lowerCamelCase : int ): nonlocal batch_sizes batch_sizes.append(__lowerCamelCase ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga A : str = mock_training_loop_function("hello" ) self.assertListEqual(__lowerCamelCase , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, "hello"] ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> str: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(__lowerCamelCase : Union[str, Any] ): pass with self.assertRaises(__lowerCamelCase ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(__lowerCamelCase : Optional[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(__lowerCamelCase ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(__lowerCamelCase ) as cm: mock_training_loop_function(1_28 , "hello" , "world" ) self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] ) self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(__lowerCamelCase : Optional[int] ): raise ValueError("Oops, we had an error!" ) with self.assertRaises(__lowerCamelCase ) as cm: mock_training_loop_function() self.assertIn("Oops, we had an error!" , cm.exception.args[0] ) @require_cuda def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A : Optional[int] = torch.cuda.memory_allocated() A : Optional[int] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase ) A : Optional[Any] = release_memory(__lowerCamelCase ) self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
720
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
17
0
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class lowerCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' a__ = None class lowerCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' a__ = PandasConfig def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Any ) -> Union[str, Any]: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) A : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__lowerCamelCase , (str, list, tuple) ): A : Union[str, Any] = data_files if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A : Optional[Any] = [dl_manager.iter_files(__lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] A : int = [] for split_name, files in data_files.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): A : int = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A : Optional[int] = [dl_manager.iter_files(__lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : pa.Table ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example A : List[Any] = table_cast(__lowerCamelCase , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple ) -> Union[str, Any]: for i, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ): with open(__lowerCamelCase , "rb" ) as f: A : str = pa.Table.from_pandas(pd.read_pickle(__lowerCamelCase ) ) yield i, self._cast_table(__lowerCamelCase )
721
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
0
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _lowerCamelCase = 3 ): if isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("number of qubits must be a integer." ) if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0." ) if math.floor(_lowerCamelCase ) != number_of_qubits: raise ValueError("number of qubits must be exact integer." ) if number_of_qubits > 10: raise ValueError("number of qubits too large to simulate(>10)." ) A : Union[str, Any] = QuantumRegister(_lowerCamelCase , "qr" ) A : int = ClassicalRegister(_lowerCamelCase , "cr" ) A : Optional[Any] = QuantumCircuit(_lowerCamelCase , _lowerCamelCase ) A : List[str] = number_of_qubits for i in range(_lowerCamelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_lowerCamelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCamelCase , _lowerCamelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_lowerCamelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_lowerCamelCase , _lowerCamelCase ) # simulate with 10000 shots A : List[Any] = Aer.get_backend("qasm_simulator" ) A : Tuple = execute(_lowerCamelCase , _lowerCamelCase , shots=1_0000 ) return job.result().get_counts(_lowerCamelCase ) if __name__ == "__main__": print( F"""Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}""" )
700
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = """▁""" __SCREAMING_SNAKE_CASE = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", """tokenizer_config_file""": """tokenizer_config.json""", } __SCREAMING_SNAKE_CASE = { """vocab_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""", }, """spm_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_config_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""", }, } __SCREAMING_SNAKE_CASE = { """facebook/m2m100_418M""": 1024, } # fmt: off __SCREAMING_SNAKE_CASE = { """m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""], """wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""] } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = ["input_ids", "attention_mask"] a__ = [] a__ = [] def __init__( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : int="<pad>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : str="m2m100" , __lowerCamelCase : Optional[Dict[str, Any]] = None , __lowerCamelCase : int=8 , **__lowerCamelCase : List[Any] , ) -> None: A : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs A : Optional[Any] = language_codes A : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES[language_codes] A : Optional[Any] = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code} A : Union[str, Any] = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__lowerCamelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__lowerCamelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , language_codes=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCamelCase , **__lowerCamelCase , ) A : Dict = vocab_file A : List[str] = load_json(__lowerCamelCase ) A : Optional[Any] = {v: k for k, v in self.encoder.items()} A : Any = spm_file A : List[Any] = load_spm(__lowerCamelCase , self.sp_model_kwargs ) A : str = len(self.encoder ) A : List[str] = { self.get_lang_token(__lowerCamelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCamelCase ) } A : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCamelCase )} A : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()} A : List[str] = src_lang if src_lang is not None else "en" A : Union[str, Any] = tgt_lang A : Union[str, Any] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) A : str = num_madeup_words @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: return len(self.encoder ) + len(self.lang_token_to_id ) @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str ) -> None: A : str = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str ) -> Tuple: if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__lowerCamelCase , self.encoder[self.unk_token] ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : int ) -> str: if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__lowerCamelCase , self.unk_token ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]: A : Any = [] A : Union[str, Any] = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token A : Optional[int] = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) A : str = [1] * len(self.prefix_tokens ) A : Dict = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : List[Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ) -> Dict: A : Optional[Any] = self.__dict__.copy() A : List[Any] = None return state def __setstate__( self : str , __lowerCamelCase : Dict ) -> None: A : List[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A : List[str] = {} A : List[Any] = load_spm(self.spm_file , self.sp_model_kwargs ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : List[str] = Path(__lowerCamelCase ) if not save_dir.is_dir(): raise OSError(F"""{save_directory} should be a directory""" ) A : int = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) A : Optional[int] = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder , __lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(__lowerCamelCase , "wb" ) as fi: A : Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (str(__lowerCamelCase ), str(__lowerCamelCase )) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str = "en" , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "ro" , **__lowerCamelCase : Tuple , ) -> BatchEncoding: A : Dict = src_lang A : int = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] , **__lowerCamelCase : Dict ) -> Optional[int]: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) A : Any = src_lang A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase ) A : int = self.get_lang_id(__lowerCamelCase ) A : Any = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : str ) -> None: A : Optional[int] = self.get_lang_token(__lowerCamelCase ) A : List[str] = self.lang_token_to_id[lang_token] A : str = [self.cur_lang_id] A : Tuple = [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : str ) -> None: A : List[Any] = self.get_lang_token(__lowerCamelCase ) A : int = self.lang_token_to_id[lang_token] A : List[Any] = [self.cur_lang_id] A : Tuple = [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str ) -> str: return self.lang_code_to_token[lang] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str ) -> int: A : Optional[Any] = self.get_lang_token(__lowerCamelCase ) return self.lang_token_to_id[lang_token] def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Dict = sentencepiece.SentencePieceProcessor(**_lowerCamelCase ) spm.Load(str(_lowerCamelCase ) ) return spm def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" ) as f: return json.load(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): with open(_lowerCamelCase , "w" ) as f: json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
701
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
17
0
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __SCREAMING_SNAKE_CASE = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) __SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions __SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(64, 64) ) __SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image) __SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0) __SCREAMING_SNAKE_CASE = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __SCREAMING_SNAKE_CASE = """Normal""" if result[0][0] == 1: __SCREAMING_SNAKE_CASE = """Abnormality detected"""
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = """▁""" __SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """google/reformer-crime-and-punishment""": ( """https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model""" ) } } __SCREAMING_SNAKE_CASE = { """google/reformer-crime-and-punishment""": 524288, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["input_ids", "attention_mask"] def __init__( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Optional[Any] , ) -> None: A : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) A : Union[str, Any] = vocab_file A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: return self.sp_model.get_piece_size() def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict[str, int]: A : int = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ) -> int: A : Union[str, Any] = self.__dict__.copy() A : int = None return state def __setstate__( self : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]: A : Dict = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A : Optional[int] = {} A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : int ) -> Dict: return self.sp_model.piece_to_id(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] ) -> Dict: if index < self.sp_model.get_piece_size(): A : Optional[Any] = self.sp_model.IdToPiece(__lowerCamelCase ) return token def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] ) -> str: A : Dict = [] A : int = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token A : Tuple = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return A : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: A : Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
703
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __SCREAMING_SNAKE_CASE = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase ( ): A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json" A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys() return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) ) def UpperCAmelCase ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : List[Any] = Path(_lowerCamelCase ) / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): init_hf_modules() A : Tuple = Path(_lowerCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : Optional[int] = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Union[str, Any] = f.read() # Imports of the form `import .xxx` A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(_lowerCamelCase ) ) def UpperCAmelCase ( _lowerCamelCase ): A : Optional[int] = False A : Tuple = [module_file] A : Optional[int] = [] # Let's recurse through all relative imports while not no_change: A : Optional[Any] = [] for f in files_to_check: new_imports.extend(get_relative_imports(_lowerCamelCase ) ) A : Optional[Any] = Path(_lowerCamelCase ).parent A : List[str] = [str(module_path / m ) for m in new_imports] A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports] A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files] A : Tuple = len(_lowerCamelCase ) == 0 all_relative_imports.extend(_lowerCamelCase ) return all_relative_imports def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Dict = f.read() # Imports of the form `import xxx` A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Only keep the top-level module A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all A : Any = list(set(_lowerCamelCase ) ) A : Tuple = [] for imp in imports: try: importlib.import_module(_lowerCamelCase ) except ImportError: missing_packages.append(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" ) return get_relative_imports(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : int = module_path.replace(os.path.sep , "." ) A : Optional[Any] = importlib.import_module(_lowerCamelCase ) if class_name is None: return find_pipeline_class(_lowerCamelCase ) return getattr(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): from ..pipelines import DiffusionPipeline A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) ) A : Union[str, Any] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _lowerCamelCase ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) A : Any = cls return pipeline_class def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ): A : List[Any] = str(_lowerCamelCase ) A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ): A : Union[str, Any] = module_file_or_url A : Any = "local" elif pretrained_model_name_or_path.count("/" ) == 0: A : Optional[Any] = get_diffusers_versions() # cut ".dev0" A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: A : List[Any] = latest_version if latest_version[1:] in available_versions else "main" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: A : Optional[Any] = f"""v{revision}""" elif revision == "main": A : Dict = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {", ".join(available_versions + ["main"] )}.""" ) # community pipeline on GitHub A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase ) try: A : Optional[int] = cached_download( _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = "git" A : Any = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached A : Any = hf_hub_download( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment A : List[str] = check_imports(_lowerCamelCase ) # Now we move the module inside our cached dynamic modules. A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_lowerCamelCase ) A : Optional[int] = Path(_lowerCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_lowerCamelCase , submodule_path / module_file ) for module_needed in modules_needed: A : int = f"""{module_needed}.py""" shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = use_auth_token elif use_auth_token is True: A : Dict = HfFolder.get_token() else: A : Tuple = None A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. A : str = submodule_path / commit_hash A : List[str] = full_submodule + os.path.sep + commit_hash create_dynamic_module(_lowerCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(_lowerCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return os.path.join(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ): A : int = get_cached_module_file( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) )
17
0
from math import sqrt def UpperCAmelCase ( _lowerCamelCase = 100_0000 ): A : int = 0 A : int = 0 A : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
704
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) __SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict: A : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , ) A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history if not do_eval: return A : List[Any] = [log for log in logs if "eval_loss" in log.keys()] A : Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A : List[str] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: self.run_seqaseq_quick( distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A : Dict = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } A : List[str] = experiments[experiment_id] A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} A : Union[str, Any] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] ) A : Dict = len(re.findall(__lowerCamelCase , cl.err ) ) self.assertEqual(__lowerCamelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : int = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , ) # Check metrics A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history A : Dict = [log for log in logs if "eval_loss" in log.keys()] A : Dict = eval_metrics[0] A : int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) # test if do_predict saves generations and metrics A : Optional[Any] = os.listdir(__lowerCamelCase ) A : Any = {os.path.basename(__lowerCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]: A : Optional[int] = "--skip_memory_metrics 0" A : str = self.run_trainer( max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , ) # Check metrics A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) A : int = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A : int = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A : Tuple = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( __lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]: A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" A : Optional[int] = self.get_auto_remove_tmp_dir() A : int = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() A : Optional[Any] = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCamelCase )} """.split() A : Optional[Any] = "\n --do_predict\n ".split() A : Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A : Dict = get_gpu_count() A : Any = get_torch_dist_unique_port() A : Optional[Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() A : Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCamelCase , env=self.get_env() ) else: A : List[Any] = ["run_translation.py"] + args with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): main() return output_dir
17
0
import os def UpperCAmelCase ( ): A : Dict = os.path.join(os.path.dirname(_lowerCamelCase ) , "num.txt" ) with open(_lowerCamelCase ) as file_hand: return str(sum(int(_lowerCamelCase ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
705
from collections.abc import Sequence def UpperCAmelCase ( _lowerCamelCase = None ): if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A : Dict = nums[0] for i in range(1 , len(_lowerCamelCase ) ): A : Tuple = nums[i] A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip()) __SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
17
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
706
from math import sqrt def UpperCAmelCase ( _lowerCamelCase = 100_0000 ): A : int = 0 A : int = 0 A : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
17
0
def UpperCAmelCase ( _lowerCamelCase = 100 ): A : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6 A : List[str] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
707
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE = """.""" if __name__ == "__main__": __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE = line.strip() __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths) raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
17
0
__SCREAMING_SNAKE_CASE = """0.18.2""" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
708
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str: A : List[Any] = parent A : Optional[int] = batch_size A : Any = image_size A : Optional[Any] = patch_size A : Optional[Any] = num_channels A : Tuple = is_training A : Optional[Any] = use_labels A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : Any = hidden_act A : Tuple = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = scope A : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : List[Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int: A : Optional[int] = DeiTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any: A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : List[str] = 1 A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict: A : str = self.type_sequence_label_size A : List[str] = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Any = 1 A : str = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ) : Tuple = config_and_inputs A : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = DeiTModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str: A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: if not self.model_tester.is_training: return A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Dict = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Tuple = False A : Any = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A : List[str] = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : int = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): A : Tuple = problem_type["title"] A : Optional[Any] = problem_type["num_labels"] A : List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A : int = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: A : Optional[Any] = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( __lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : List[str] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : str = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) A : Dict = self.default_image_processor A : Optional[int] = prepare_img() A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A : List[str] = model(__lowerCamelCase )
17
0
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCamelCase_ : '''simple docstring''' a__ = 42 # setable values a__ = 42 a__ = 42 a__ = None @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , __lowerCamelCase : CommonSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray ) -> Optional[int]: return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase ) @dataclass class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = 42 class lowerCamelCase_ ( _A ,_A ): '''simple docstring''' a__ = [e.name for e in FlaxKarrasDiffusionSchedulers] a__ = 42 @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: return True @register_to_config def __init__( self : Tuple , __lowerCamelCase : int = 10_00 , __lowerCamelCase : float = 0.0001 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : str = "linear" , __lowerCamelCase : Optional[jnp.ndarray] = None , __lowerCamelCase : str = "fixed_small" , __lowerCamelCase : bool = True , __lowerCamelCase : str = "epsilon" , __lowerCamelCase : jnp.dtype = jnp.floataa , ) -> List[str]: A : str = dtype def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState: if common is None: A : int = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution A : Tuple = jnp.array(1.0 , dtype=self.dtype ) A : Union[str, Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : DDPMSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : Optional[int] = None ) -> jnp.ndarray: return sample def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : DDPMSchedulerState , __lowerCamelCase : int , __lowerCamelCase : Tuple = () ) -> DDPMSchedulerState: A : List[str] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 A : int = (jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : DDPMSchedulerState , __lowerCamelCase : Any , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None ) -> Any: A : str = state.common.alphas_cumprod[t] A : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample A : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: A : Tuple = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": A : str = jnp.clip(__lowerCamelCase , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": A : str = jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-20 ) ) elif variance_type == "fixed_large": A : Union[str, Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log A : str = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": A : Optional[Any] = variance A : str = state.common.betas[t] A : Optional[int] = (predicted_variance + 1) / 2 A : int = frac * max_log + (1 - frac) * min_log return variance def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : DDPMSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : Optional[jax.random.KeyArray] = None , __lowerCamelCase : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: A : Union[str, Any] = timestep if key is None: A : Any = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: A : Any = jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 ) else: A : Tuple = None # 1. compute alphas, betas A : Dict = state.common.alphas_cumprod[t] A : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) A : Any = 1 - alpha_prod_t A : str = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": A : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": A : Union[str, Any] = model_output elif self.config.prediction_type == "v_prediction": A : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """ " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: A : Dict = jnp.clip(__lowerCamelCase , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t A : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): A : str = jax.random.split(__lowerCamelCase , num=1 ) A : Any = jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise A : List[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) A : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : DDPMSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , ) -> jnp.ndarray: return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : DDPMSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , ) -> jnp.ndarray: return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def __len__( self : str ) -> str: return self.config.num_train_timesteps
709
from sklearn.metrics import recall_score import datasets __SCREAMING_SNAKE_CASE = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ __SCREAMING_SNAKE_CASE = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ __SCREAMING_SNAKE_CASE = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]: A : str = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
17
0
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]: A : str = max_length A : Optional[int] = max_position_embeddings @add_start_docstrings(__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool: A : List[Any] = input_ids.shape[-1] A : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , ) A : str = start_length A : Optional[Any] = max_new_tokens A : Dict = start_length + max_new_tokens @add_start_docstrings(__lowerCamelCase ) def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]: A : str = max_time A : Dict = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool: return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: for stopping_criterium in self: if isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length elif isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = stopping_criteria.max_length A : Any = deepcopy(_lowerCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) ) return new_stopping_criteria
710
from collections import deque from .hash_table import HashTable class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]: super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) A : Dict = self.values[key] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
17
0
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' a__ = JukeboxTokenizer a__ = { "artist": "Zac Brown Band", "genres": "Country", "lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ", } @require_torch def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: import torch A : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" ) A : Tuple = tokenizer(**self.metas )["input_ids"] # fmt: off A : List[Any] = [ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple: import torch A : int = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" ) A : str = tokenizer(**self.metas )["input_ids"] # fmt: off A : List[str] = [ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
711
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCamelCase_ : '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict: A : Optional[Any] = 4 A : List[str] = 32 A : Any = (32, 32) A : str = torch.manual_seed(0 ) A : int = torch.device(__lowerCamelCase ) A : List[str] = (batch_size, num_channels) + sizes A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) A : int = {"hidden_states": hidden_states} if include_temb: A : Any = 1_28 A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: A : str = torch.manual_seed(1 ) A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": A : Dict = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]: A , A : str = self.prepare_init_args_and_inputs_for_common() A : List[Any] = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): A : int = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) A : Any = output[0, -1, -3:, -3:] A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.prepare_init_args_and_inputs_for_common() A : str = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Optional[int] = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Optional[Any] = output[0] A : List[str] = torch.device(__lowerCamelCase ) A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase ) A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
17
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def UpperCAmelCase ( _lowerCamelCase ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def UpperCAmelCase ( _lowerCamelCase ): class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Dict ) -> Optional[Any]: A : Optional[int] = metric_id class lowerCamelCase_ : '''simple docstring''' a__ = [MetricMock(_A ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if "tmp_path" in args: A : Optional[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(_lowerCamelCase , match="https://huggingface.co/docs/evaluate" ): func(*_lowerCamelCase )
712
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]: A : str = max_length A : Optional[int] = max_position_embeddings @add_start_docstrings(__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool: A : List[Any] = input_ids.shape[-1] A : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , ) A : str = start_length A : Optional[Any] = max_new_tokens A : Dict = start_length + max_new_tokens @add_start_docstrings(__lowerCamelCase ) def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]: A : str = max_time A : Dict = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool: return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: for stopping_criterium in self: if isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length elif isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = stopping_criteria.max_length A : Any = deepcopy(_lowerCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) ) return new_stopping_criteria
17
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "beit" def __init__( self : Dict , __lowerCamelCase : List[str]=81_92 , __lowerCamelCase : List[Any]=7_68 , __lowerCamelCase : int=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Optional[Any]=30_72 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : List[str]=2_24 , __lowerCamelCase : str=16 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Dict=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Any=False , __lowerCamelCase : str=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : int=True , __lowerCamelCase : Any=[3, 5, 7, 11] , __lowerCamelCase : Optional[int]=[1, 2, 3, 6] , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=0.4 , __lowerCamelCase : List[str]=2_56 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Any=2_55 , **__lowerCamelCase : Union[str, Any] , ) -> Optional[Any]: super().__init__(**__lowerCamelCase ) A : Optional[Any] = vocab_size A : Tuple = hidden_size A : List[Any] = num_hidden_layers A : List[Any] = num_attention_heads A : List[str] = intermediate_size A : Tuple = hidden_act A : Tuple = hidden_dropout_prob A : str = attention_probs_dropout_prob A : Optional[int] = initializer_range A : Any = layer_norm_eps A : Union[str, Any] = image_size A : Union[str, Any] = patch_size A : int = num_channels A : str = use_mask_token A : Any = use_absolute_position_embeddings A : str = use_relative_position_bias A : Dict = use_shared_relative_position_bias A : Optional[Any] = layer_scale_init_value A : str = drop_path_rate A : Any = use_mean_pooling # decode head attributes (semantic segmentation) A : List[str] = out_indices A : Dict = pool_scales # auxiliary head attributes (semantic segmentation) A : Dict = use_auxiliary_head A : Any = auxiliary_loss_weight A : Optional[int] = auxiliary_channels A : int = auxiliary_num_convs A : str = auxiliary_concat_input A : int = semantic_loss_ignore_index class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> float: return 1e-4
713
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
0
from __future__ import annotations def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ): if start is None: A : Union[str, Any] = 0 if end is None: A : Optional[int] = len(_lowerCamelCase ) - 1 if start >= end: return A : str = (start + end) // 2 slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase ) if sequence[end] < sequence[mid]: A : Optional[int] = sequence[mid], sequence[end] slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
714
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
0
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "EncodecFeatureExtractor" a__ = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Optional[int]: super().__init__(__lowerCamelCase , __lowerCamelCase ) A : int = self.feature_extractor A : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=True ) -> Union[str, Any]: return self.tokenizer.get_decoder_prompt_ids(task=__lowerCamelCase , language=__lowerCamelCase , no_timestamps=__lowerCamelCase ) def __call__( self : Dict , *__lowerCamelCase : Tuple , **__lowerCamelCase : Any ) -> Optional[Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__lowerCamelCase , **__lowerCamelCase ) A : Any = kwargs.pop("audio" , __lowerCamelCase ) A : int = kwargs.pop("sampling_rate" , __lowerCamelCase ) A : Any = kwargs.pop("text" , __lowerCamelCase ) if len(__lowerCamelCase ) > 0: A : Optional[Any] = args[0] A : Dict = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: A : Union[str, Any] = self.tokenizer(__lowerCamelCase , **__lowerCamelCase ) if audio is not None: A : Dict = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: A : int = audio_inputs["input_values"] if "padding_mask" in audio_inputs: A : Dict = audio_inputs["padding_mask"] return inputs def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ) -> Optional[Any]: A : Optional[Any] = kwargs.pop("audio" , __lowerCamelCase ) A : Tuple = kwargs.pop("padding_mask" , __lowerCamelCase ) if len(__lowerCamelCase ) > 0: A : Any = args[0] A : Tuple = args[1:] if audio_values is not None: return self._decode_audio(__lowerCamelCase , padding_mask=__lowerCamelCase ) else: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[Any] ) -> Optional[int]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional = None ) -> List[np.ndarray]: A : List[str] = to_numpy(__lowerCamelCase ) A : List[str] = audio_values.shape if padding_mask is None: return list(__lowerCamelCase ) A : List[Any] = to_numpy(__lowerCamelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) A : Optional[int] = seq_len - padding_mask.shape[-1] A : Union[str, Any] = 1 - self.feature_extractor.padding_value A : Any = np.pad(__lowerCamelCase , ((0, 0), (0, difference)) , "constant" , constant_values=__lowerCamelCase ) A : List[Any] = audio_values.tolist() for i in range(__lowerCamelCase ): A : Tuple = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] A : Union[str, Any] = sliced_audio.reshape(__lowerCamelCase , -1 ) return audio_values
715
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
0
from __future__ import annotations import requests __SCREAMING_SNAKE_CASE = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = "new" , _lowerCamelCase = None ): A : int = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(_lowerCamelCase ) - valid_terms ) ): A : Optional[Any] = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(_lowerCamelCase ) A : List[str] = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError A : Any = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(_lowerCamelCase )} A : Tuple = {} for id_ in range(_lowerCamelCase ): A : Union[str, Any] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
716
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int: A : Optional[int] = parent A : List[str] = batch_size A : Tuple = image_size A : List[str] = num_channels A : List[str] = embeddings_size A : List[str] = hidden_sizes A : str = depths A : Optional[Any] = is_training A : int = use_labels A : Optional[int] = hidden_act A : List[Any] = num_labels A : List[str] = scope A : str = len(__lowerCamelCase ) A : Optional[int] = out_features A : str = out_indices A : Optional[int] = num_groups def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple: A : Union[str, Any] = self.num_labels A : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]: A : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A : Optional[Any] = None A : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : List[str] = self.prepare_config_and_inputs() A , A , A : Tuple = config_and_inputs A : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Any = BitModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return @unittest.skip(reason="Bit does not output attentions" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) A : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Optional[Any] = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ): A : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Union[str, Any] = BitModelTester(self )
17
0
'''simple docstring''' import warnings from functools import wraps from typing import Callable def UpperCAmelCase ( _lowerCamelCase ): @wraps(_lowerCamelCase ) def _inner_fn(*_lowerCamelCase , **_lowerCamelCase ): warnings.warn( (f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _lowerCamelCase , ) return fn(*_lowerCamelCase , **_lowerCamelCase ) return _inner_fn
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
0
from __future__ import annotations from random import choice def UpperCAmelCase ( _lowerCamelCase ): return choice(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : List[str] = random_pivot(_lowerCamelCase ) # partition based on pivot # linear time A : Union[str, Any] = [e for e in lst if e < pivot] A : int = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(_lowerCamelCase ) == k - 1: return pivot # pivot is in elements bigger than k elif len(_lowerCamelCase ) < k - 1: return kth_number(_lowerCamelCase , k - len(_lowerCamelCase ) - 1 ) # pivot is in elements smaller than k else: return kth_number(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
718
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
719
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (PNDMScheduler,) a__ = (("num_inference_steps", 50),) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]: A : Union[str, Any] = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple: A : Dict = dict(self.forward_default_kwargs ) A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : Union[str, Any] = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Any = self.get_scheduler_config(**__lowerCamelCase ) A : int = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : Dict = scheduler_class.from_pretrained(__lowerCamelCase ) new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str: A : List[str] = dict(self.forward_default_kwargs ) A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : List[str] = self.dummy_sample A : Any = 0.1 * sample A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Tuple = self.get_scheduler_config() A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : str = scheduler_class.from_pretrained(__lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[:] A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]: A : Optional[Any] = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(**__lowerCamelCase ) A : str = scheduler_class(**__lowerCamelCase ) A : List[str] = 10 A : Union[str, Any] = self.dummy_model() A : int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A : Tuple = model(__lowerCamelCase , __lowerCamelCase ) A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: A : Union[str, Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) A : List[Any] = self.dummy_sample A : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCamelCase ) elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ): A : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A : str = 27 for scheduler_class in self.scheduler_classes: A : Tuple = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : Optional[Any] = self.full_loop() A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Any = self.full_loop(prediction_type="v_prediction" ) A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) A : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
17
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __SCREAMING_SNAKE_CASE = data_utils.TransfoXLTokenizer __SCREAMING_SNAKE_CASE = data_utils.TransfoXLCorpus __SCREAMING_SNAKE_CASE = data_utils __SCREAMING_SNAKE_CASE = data_utils def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(_lowerCamelCase , "rb" ) as fp: A : Dict = pickle.load(_lowerCamelCase , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) A : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" ) A : Dict = corpus.vocab.__dict__ torch.save(_lowerCamelCase , _lowerCamelCase ) A : Optional[int] = corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , _lowerCamelCase ) A : Optional[int] = pytorch_dump_folder_path + "/" + CORPUS_NAME print(f"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(_lowerCamelCase , _lowerCamelCase ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model A : str = os.path.abspath(_lowerCamelCase ) A : Optional[Any] = os.path.abspath(_lowerCamelCase ) print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": A : int = TransfoXLConfig() else: A : Dict = TransfoXLConfig.from_json_file(_lowerCamelCase ) print(f"""Building PyTorch model from configuration: {config}""" ) A : int = TransfoXLLMHeadModel(_lowerCamelCase ) A : str = load_tf_weights_in_transfo_xl(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save pytorch-model A : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) A : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase ) print(f"""Save PyTorch model to {os.path.abspath(_lowerCamelCase )}""" ) torch.save(model.state_dict() , _lowerCamelCase ) print(f"""Save configuration file to {os.path.abspath(_lowerCamelCase )}""" ) with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--tf_checkpoint_path""", default="""""", type=str, help="""An optional path to a TensorFlow checkpoint path to be converted.""", ) parser.add_argument( """--transfo_xl_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--transfo_xl_dataset_file""", default="""""", type=str, help="""An optional dataset file to be converted in a vocabulary.""", ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
720
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
17
0
from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Dict = list(_lowerCamelCase ) A : Optional[Any] = list(_lowerCamelCase ) A : Optional[Any] = 0 for i in range(len(_lowerCamelCase ) ): if lista[i] != lista[i]: count += 1 A : Optional[int] = "_" if count > 1: return False else: return "".join(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): A : List[str] = [] while True: A : int = ["$"] * len(_lowerCamelCase ) A : List[str] = [] for i in range(len(_lowerCamelCase ) ): for j in range(i + 1 , len(_lowerCamelCase ) ): A : List[str] = compare_string(binary[i] , binary[j] ) if k is False: A : List[Any] = "*" A : int = "*" temp.append("X" ) for i in range(len(_lowerCamelCase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_lowerCamelCase ) == 0: return pi A : List[Any] = list(set(_lowerCamelCase ) ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : List[str] = [] for minterm in minterms: A : Optional[Any] = "" for _ in range(_lowerCamelCase ): A : int = str(minterm % 2 ) + string minterm //= 2 temp.append(_lowerCamelCase ) return temp def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : int = list(_lowerCamelCase ) A : Tuple = list(_lowerCamelCase ) A : str = 0 for i in range(len(_lowerCamelCase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : List[Any] = [] A : str = [0] * len(_lowerCamelCase ) for i in range(len(chart[0] ) ): A : int = 0 A : Optional[int] = -1 for j in range(len(_lowerCamelCase ) ): if chart[j][i] == 1: count += 1 A : Optional[Any] = j if count == 1: A : int = 1 for i in range(len(_lowerCamelCase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_lowerCamelCase ) ): A : Optional[int] = 0 temp.append(prime_implicants[i] ) while True: A : Any = 0 A : Optional[Any] = -1 A : Tuple = 0 for i in range(len(_lowerCamelCase ) ): A : Dict = chart[i].count(1 ) if count_n > max_n: A : int = count_n A : Dict = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_lowerCamelCase ) ): A : List[str] = 0 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : List[str] = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )] for i in range(len(_lowerCamelCase ) ): A : int = prime_implicants[i].count("_" ) for j in range(len(_lowerCamelCase ) ): if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ): A : Optional[Any] = 1 return chart def UpperCAmelCase ( ): A : Optional[Any] = int(input("Enter the no. of variables\n" ) ) A : int = [ float(_lowerCamelCase ) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split() ] A : Optional[int] = decimal_to_binary(_lowerCamelCase , _lowerCamelCase ) A : int = check(_lowerCamelCase ) print("Prime Implicants are:" ) print(_lowerCamelCase ) A : Optional[int] = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase ) A : Optional[int] = selection(_lowerCamelCase , _lowerCamelCase ) print("Essential Prime Implicants are:" ) print(_lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
721
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "blenderbot-small" a__ = ["past_key_values"] a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : int , __lowerCamelCase : Any=5_02_65 , __lowerCamelCase : List[str]=5_12 , __lowerCamelCase : Any=8 , __lowerCamelCase : Optional[Any]=20_48 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Union[str, Any]=8 , __lowerCamelCase : Any=20_48 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Optional[Any]=5_12 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Any=1 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=2 , __lowerCamelCase : Optional[Any]=2 , **__lowerCamelCase : Dict , ) -> Tuple: A : Dict = vocab_size A : List[str] = max_position_embeddings A : List[str] = d_model A : Any = encoder_ffn_dim A : List[Any] = encoder_layers A : Dict = encoder_attention_heads A : Optional[int] = decoder_ffn_dim A : Any = decoder_layers A : Dict = decoder_attention_heads A : List[Any] = dropout A : Dict = attention_dropout A : str = activation_dropout A : List[Any] = activation_function A : Any = init_std A : Optional[Any] = encoder_layerdrop A : Dict = decoder_layerdrop A : Tuple = use_cache A : Union[str, Any] = encoder_layers A : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class lowerCamelCase_ ( _A ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: A : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A : int = {0: "batch"} A : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: A : Dict = {0: "batch", 1: "decoder_sequence"} A : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. A : Dict = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A : Union[str, Any] = self.num_layers for i in range(__lowerCamelCase ): A : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} A : Any = {0: "batch", 2: "past_sequence + sequence"} else: A : Dict = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: A : Optional[int] = super().outputs else: A : Tuple = super(__lowerCamelCase , self ).outputs if self.use_past: A : Any = self.num_layers for i in range(__lowerCamelCase ): A : List[Any] = {0: "batch", 2: "past_sequence + sequence"} A : List[str] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: A : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs A : List[Any] = seq_length if not self.use_past else 1 A : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : List[str] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} A : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A : Tuple = common_inputs["input_ids"].shape A : str = common_inputs["decoder_input_ids"].shape[1] A : Optional[Any] = self.num_attention_heads A : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A : Optional[int] = decoder_seq_length + 3 A : Optional[int] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A : Dict = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) A : Union[str, Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A : Union[str, Any] = self.num_layers A : str = min(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers A : Optional[int] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. A : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: A : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A : Optional[Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values A : Dict = seqlen + 2 A : Optional[int] = self.num_layers A : List[str] = self.num_attention_heads A : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A : Union[str, Any] = common_inputs["attention_mask"].dtype A : Tuple = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) A : str = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A : Optional[Any] = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A : Any = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) A : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence A : List[str] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size A : Any = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: A : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": A : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: A : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ) -> Union[str, Any]: if self.task in ["default", "seq2seq-lm"]: A : Optional[Any] = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: A : Optional[Any] = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
700
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
701
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
17
0
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( """The `inpainting.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionInpaintPipeline` instead.""" )
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0
import itertools import string from collections.abc import Generator, Iterable def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : str = iter(_lowerCamelCase ) while True: A : Union[str, Any] = tuple(itertools.islice(_lowerCamelCase , _lowerCamelCase ) ) if not chunk: return yield chunk def UpperCAmelCase ( _lowerCamelCase ): A : str = "".join([c.upper() for c in dirty if c in string.ascii_letters] ) A : int = "" if len(_lowerCamelCase ) < 2: return dirty for i in range(len(_lowerCamelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowerCamelCase ) & 1: clean += "X" return clean def UpperCAmelCase ( _lowerCamelCase ): # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) A : Optional[Any] = "ABCDEFGHIKLMNOPQRSTUVWXYZ" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler A : List[Any] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowerCamelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowerCamelCase ) return table def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = generate_table(_lowerCamelCase ) A : str = prepare_input(_lowerCamelCase ) A : List[Any] = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowerCamelCase , 2 ): A : List[Any] = divmod(table.index(_lowerCamelCase ) , 5 ) A : List[str] = divmod(table.index(_lowerCamelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : List[str] = generate_table(_lowerCamelCase ) A : Tuple = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowerCamelCase , 2 ): A : Tuple = divmod(table.index(_lowerCamelCase ) , 5 ) A : Optional[Any] = divmod(table.index(_lowerCamelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
703
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __SCREAMING_SNAKE_CASE = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase ( ): A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json" A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys() return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) ) def UpperCAmelCase ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : List[Any] = Path(_lowerCamelCase ) / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): init_hf_modules() A : Tuple = Path(_lowerCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : Optional[int] = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Union[str, Any] = f.read() # Imports of the form `import .xxx` A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(_lowerCamelCase ) ) def UpperCAmelCase ( _lowerCamelCase ): A : Optional[int] = False A : Tuple = [module_file] A : Optional[int] = [] # Let's recurse through all relative imports while not no_change: A : Optional[Any] = [] for f in files_to_check: new_imports.extend(get_relative_imports(_lowerCamelCase ) ) A : Optional[Any] = Path(_lowerCamelCase ).parent A : List[str] = [str(module_path / m ) for m in new_imports] A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports] A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files] A : Tuple = len(_lowerCamelCase ) == 0 all_relative_imports.extend(_lowerCamelCase ) return all_relative_imports def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Dict = f.read() # Imports of the form `import xxx` A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Only keep the top-level module A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all A : Any = list(set(_lowerCamelCase ) ) A : Tuple = [] for imp in imports: try: importlib.import_module(_lowerCamelCase ) except ImportError: missing_packages.append(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" ) return get_relative_imports(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : int = module_path.replace(os.path.sep , "." ) A : Optional[Any] = importlib.import_module(_lowerCamelCase ) if class_name is None: return find_pipeline_class(_lowerCamelCase ) return getattr(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): from ..pipelines import DiffusionPipeline A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) ) A : Union[str, Any] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _lowerCamelCase ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) A : Any = cls return pipeline_class def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ): A : List[Any] = str(_lowerCamelCase ) A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ): A : Union[str, Any] = module_file_or_url A : Any = "local" elif pretrained_model_name_or_path.count("/" ) == 0: A : Optional[Any] = get_diffusers_versions() # cut ".dev0" A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: A : List[Any] = latest_version if latest_version[1:] in available_versions else "main" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: A : Optional[Any] = f"""v{revision}""" elif revision == "main": A : Dict = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {", ".join(available_versions + ["main"] )}.""" ) # community pipeline on GitHub A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase ) try: A : Optional[int] = cached_download( _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = "git" A : Any = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached A : Any = hf_hub_download( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment A : List[str] = check_imports(_lowerCamelCase ) # Now we move the module inside our cached dynamic modules. A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_lowerCamelCase ) A : Optional[int] = Path(_lowerCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_lowerCamelCase , submodule_path / module_file ) for module_needed in modules_needed: A : int = f"""{module_needed}.py""" shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = use_auth_token elif use_auth_token is True: A : Dict = HfFolder.get_token() else: A : Tuple = None A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. A : str = submodule_path / commit_hash A : List[str] = full_submodule + os.path.sep + commit_hash create_dynamic_module(_lowerCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(_lowerCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return os.path.join(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ): A : int = get_cached_module_file( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) )
17
0
def UpperCAmelCase ( _lowerCamelCase ): if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) A : Any = sorted(string.lower() ) return len(_lowerCamelCase ) == len(set(_lowerCamelCase ) ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = input("""Enter a string """).strip() __SCREAMING_SNAKE_CASE = is_isogram(input_str) print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
704
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) __SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict: A : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , ) A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history if not do_eval: return A : List[Any] = [log for log in logs if "eval_loss" in log.keys()] A : Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A : List[str] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: self.run_seqaseq_quick( distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A : Dict = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } A : List[str] = experiments[experiment_id] A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} A : Union[str, Any] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] ) A : Dict = len(re.findall(__lowerCamelCase , cl.err ) ) self.assertEqual(__lowerCamelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : int = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , ) # Check metrics A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history A : Dict = [log for log in logs if "eval_loss" in log.keys()] A : Dict = eval_metrics[0] A : int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) # test if do_predict saves generations and metrics A : Optional[Any] = os.listdir(__lowerCamelCase ) A : Any = {os.path.basename(__lowerCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]: A : Optional[int] = "--skip_memory_metrics 0" A : str = self.run_trainer( max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , ) # Check metrics A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) A : int = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A : int = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A : Tuple = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( __lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]: A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" A : Optional[int] = self.get_auto_remove_tmp_dir() A : int = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() A : Optional[Any] = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCamelCase )} """.split() A : Optional[Any] = "\n --do_predict\n ".split() A : Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A : Dict = get_gpu_count() A : Any = get_torch_dist_unique_port() A : Optional[Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() A : Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCamelCase , env=self.get_env() ) else: A : List[Any] = ["run_translation.py"] + args with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): main() return output_dir
17
0
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): while a != 0: A : Dict = b % a, a return b def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): if gcd(_lowerCamelCase , _lowerCamelCase ) != 1: A : Tuple = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(_lowerCamelCase ) A : Optional[Any] = 1, 0, a A : str = 0, 1, m while va != 0: A : Optional[Any] = ua // va A : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
705
from collections.abc import Sequence def UpperCAmelCase ( _lowerCamelCase = None ): if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A : Dict = nums[0] for i in range(1 , len(_lowerCamelCase ) ): A : Tuple = nums[i] A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip()) __SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
17
0
from __future__ import annotations def UpperCAmelCase ( _lowerCamelCase ): # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(_lowerCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(_lowerCamelCase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
706
from math import sqrt def UpperCAmelCase ( _lowerCamelCase = 100_0000 ): A : int = 0 A : int = 0 A : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
17
0
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowerCamelCase_ : '''simple docstring''' def __init__( self : str , __lowerCamelCase : str = "cpu" , __lowerCamelCase : str = "openai/clip-vit-large-patch14" ) -> None: A : int = device A : Optional[Any] = CLIPTokenizerFast.from_pretrained(__lowerCamelCase ) A : Optional[int] = [0.48145466, 0.4578275, 0.40821073] A : Tuple = [0.26862954, 0.26130258, 0.27577711] A : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std ) A : int = torchvision.transforms.Resize(2_24 ) A : Optional[int] = torchvision.transforms.CenterCrop(2_24 ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> str: A : Tuple = self.resize(__lowerCamelCase ) A : Dict = self.center_crop(__lowerCamelCase ) A : Any = self.normalize(__lowerCamelCase ) return images def __call__( self : List[str] , __lowerCamelCase : int=None , __lowerCamelCase : int=None , **__lowerCamelCase : Optional[int] ) -> Union[str, Any]: A : Any = self.tokenizer(text=__lowerCamelCase , **__lowerCamelCase ) A : Optional[Any] = self.preprocess_img(__lowerCamelCase ) A : str = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Tuple=0.01 , __lowerCamelCase : Dict=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str="image" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : str=False , __lowerCamelCase : List[str]=False , ) -> None: super().__init__() A : Optional[Any] = None A : str = device if device else get_device() if vqgan: A : List[str] = vqgan else: A : Dict = load_vqgan(self.device , conf_path=__lowerCamelCase , ckpt_path=__lowerCamelCase ) self.vqgan.eval() if clip: A : Optional[Any] = clip else: A : Dict = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" ) self.clip.to(self.device ) A : Optional[Any] = ProcessorGradientFlow(device=self.device ) A : Dict = iterations A : Tuple = lr A : Tuple = log A : Optional[int] = make_grid A : str = return_val A : List[Any] = quantize A : str = self.vqgan.decoder.z_shape def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : int=5 , __lowerCamelCase : str=True ) -> Union[str, Any]: A : Optional[Any] = [] if output_path is None: A : List[Any] = "./animation.gif" if input_path is None: A : str = self.save_path A : Optional[int] = sorted(glob(input_path + "/*" ) ) if not len(__lowerCamelCase ): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)" ) if len(__lowerCamelCase ) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" ) A : Union[str, Any] = total_duration / len(__lowerCamelCase ) A : Optional[Any] = [frame_duration] * len(__lowerCamelCase ) if extend_frames: A : int = 1.5 A : int = 3 for file_name in paths: if file_name.endswith(".png" ): images.append(imageio.imread(__lowerCamelCase ) ) imageio.mimsave(__lowerCamelCase , __lowerCamelCase , duration=__lowerCamelCase ) print(F"""gif saved to {output_path}""" ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=None ) -> Union[str, Any]: if not (path or img): raise ValueError("Input either path or tensor" ) if img is not None: raise NotImplementedError A : List[str] = preprocess(Image.open(__lowerCamelCase ) , target_image_size=2_56 ).to(self.device ) A : str = preprocess_vqgan(__lowerCamelCase ) A : Optional[Any] = self.vqgan.encode(__lowerCamelCase ) return z def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]: A : int = self.latent.detach().requires_grad_() A : List[str] = base_latent + transform_vector if self.quantize: A : Union[str, Any] = self.vqgan.quantize(__lowerCamelCase ) else: A : str = trans_latent return self.vqgan.decode(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=None ) -> Dict: A : Union[str, Any] = self.clip_preprocessor(text=__lowerCamelCase , images=__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase ) A : int = self.clip(**__lowerCamelCase ) A : Optional[int] = clip_outputs.logits_per_image if weights is not None: A : Any = similarity_logits * weights return similarity_logits.sum() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> str: A : Any = self._get_clip_similarity(pos_prompts["prompts"] , __lowerCamelCase , weights=(1 / pos_prompts["weights"]) ) if neg_prompts: A : List[Any] = self._get_clip_similarity(neg_prompts["prompts"] , __lowerCamelCase , weights=neg_prompts["weights"] ) else: A : Tuple = torch.tensor([1] , device=self.device ) A : List[Any] = -torch.log(__lowerCamelCase ) + torch.log(__lowerCamelCase ) return loss def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ) -> str: A : Any = torch.randn_like(self.latent , requires_grad=__lowerCamelCase , device=self.device ) A : Optional[int] = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() A : List[str] = self._add_vector(__lowerCamelCase ) A : List[Any] = loop_post_process(__lowerCamelCase ) A : Any = self._get_CLIP_loss(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) print("CLIP loss" , __lowerCamelCase ) if self.log: wandb.log({"CLIP Loss": clip_loss} ) clip_loss.backward(retain_graph=__lowerCamelCase ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> Optional[Any]: wandb.init(reinit=__lowerCamelCase , project="face-editor" ) wandb.config.update({"Positive Prompts": positive_prompts} ) wandb.config.update({"Negative Prompts": negative_prompts} ) wandb.config.update({"lr": self.lr, "iterations": self.iterations} ) if image_path: A : List[str] = Image.open(__lowerCamelCase ) A : Optional[Any] = image.resize((2_56, 2_56) ) wandb.log("Original Image" , wandb.Image(__lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]: if not prompts: return [] A : Optional[Any] = [] A : int = [] if isinstance(__lowerCamelCase , __lowerCamelCase ): A : int = [prompt.strip() for prompt in prompts.split("|" )] for prompt in prompts: if isinstance(__lowerCamelCase , (tuple, list) ): A : List[Any] = prompt[0] A : Optional[Any] = float(prompt[1] ) elif ":" in prompt: A : Optional[Any] = prompt.split(":" ) A : Union[str, Any] = float(__lowerCamelCase ) else: A : Tuple = prompt A : Optional[int] = 1.0 processed_prompts.append(__lowerCamelCase ) weights.append(__lowerCamelCase ) return { "prompts": processed_prompts, "weights": torch.tensor(__lowerCamelCase , device=self.device ), } def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=None , ) -> Optional[int]: if image_path: A : str = self._get_latent(__lowerCamelCase ) else: A : List[Any] = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) assert pos_prompts, "You must provide at least one positive prompt." A : List[str] = self.process_prompts(__lowerCamelCase ) A : Dict = self.process_prompts(__lowerCamelCase ) if save_final and save_path is None: A : Any = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) ) if not os.path.exists(__lowerCamelCase ): os.makedirs(__lowerCamelCase ) else: A : List[Any] = save_path + "_" + get_timestamp() os.makedirs(__lowerCamelCase ) A : Dict = save_path A : List[str] = self.vqgan.decode(self.latent )[0] if show_intermediate: print("Original Image" ) show_pil(custom_to_pil(__lowerCamelCase ) ) A : Union[str, Any] = loop_post_process(__lowerCamelCase ) for iter, transformed_img in enumerate(self._optimize_CLIP(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ): if show_intermediate: show_pil(__lowerCamelCase ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) ) if self.log: wandb.log({"Image": wandb.Image(__lowerCamelCase )} ) if show_final: show_pil(__lowerCamelCase ) if save_final: transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
707
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE = """.""" if __name__ == "__main__": __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE = line.strip() __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths) raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
17
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = ShapEPipeline a__ = ["prompt"] a__ = ["prompt"] a__ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] a__ = False @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: return 32 @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return 32 @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]: return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: return 8 @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: torch.manual_seed(0 ) A : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: torch.manual_seed(0 ) A : Tuple = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } A : List[Any] = PriorTransformer(**__lowerCamelCase ) return model @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: torch.manual_seed(0 ) A : Dict = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } A : Optional[Any] = ShapERenderer(**__lowerCamelCase ) return model def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A : List[str] = self.dummy_prior A : Dict = self.dummy_text_encoder A : Optional[Any] = self.dummy_tokenizer A : Union[str, Any] = self.dummy_renderer A : str = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , ) A : Union[str, Any] = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int]=0 ) -> List[Any]: if str(__lowerCamelCase ).startswith("mps" ): A : Dict = torch.manual_seed(__lowerCamelCase ) else: A : List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) A : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: A : List[Any] = "cpu" A : Optional[Any] = self.get_dummy_components() A : int = self.pipeline_class(**__lowerCamelCase ) A : List[Any] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Optional[int] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) A : str = output.images[0] A : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) A : int = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: A : List[Any] = torch_device == "cpu" A : Optional[int] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = self.get_dummy_components() A : Tuple = self.pipeline_class(**__lowerCamelCase ) A : int = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : List[Any] = 1 A : Tuple = 2 A : str = self.get_dummy_inputs(__lowerCamelCase ) for key in inputs.keys(): if key in self.batch_params: A : str = batch_size * [inputs[key]] A : Union[str, Any] = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]: A : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy" ) A : str = ShapEPipeline.from_pretrained("openai/shap-e" ) A : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) A : Dict = pipe( "a shark" , generator=__lowerCamelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
708
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str: A : List[Any] = parent A : Optional[int] = batch_size A : Any = image_size A : Optional[Any] = patch_size A : Optional[Any] = num_channels A : Tuple = is_training A : Optional[Any] = use_labels A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : Any = hidden_act A : Tuple = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = scope A : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : List[Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int: A : Optional[int] = DeiTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any: A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : List[str] = 1 A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict: A : str = self.type_sequence_label_size A : List[str] = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Any = 1 A : str = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ) : Tuple = config_and_inputs A : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = DeiTModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str: A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: if not self.model_tester.is_training: return A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Dict = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Tuple = False A : Any = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A : List[str] = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : int = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): A : Tuple = problem_type["title"] A : Optional[Any] = problem_type["num_labels"] A : List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A : int = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: A : Optional[Any] = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( __lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : List[str] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : str = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) A : Dict = self.default_image_processor A : Optional[int] = prepare_img() A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A : List[str] = model(__lowerCamelCase )
17
0
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class lowerCamelCase_ : '''simple docstring''' pass
709
from sklearn.metrics import recall_score import datasets __SCREAMING_SNAKE_CASE = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ __SCREAMING_SNAKE_CASE = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ __SCREAMING_SNAKE_CASE = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]: A : str = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
17
0
from __future__ import annotations def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Union[str, Any] = sorted(numsa + numsa ) A : List[Any] = divmod(len(_lowerCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE = [float(x) for x in input("""Enter the elements of first array: """).split()] __SCREAMING_SNAKE_CASE = [float(x) for x in input("""Enter the elements of second array: """).split()] print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
710
from collections import deque from .hash_table import HashTable class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]: super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) A : Dict = self.values[key] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
17
0
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = GPTSwaTokenizer a__ = False a__ = True a__ = False def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: super().setUp() # We have a SentencePiece fixture for testing A : Optional[Any] = GPTSwaTokenizer(__lowerCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Optional[Any] ) -> Optional[Any]: A : Any = "This is a test" A : Optional[int] = "This is a test" return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : Optional[Any] = "<s>" A : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: A : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__lowerCamelCase ) , 20_00 ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 20_00 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: A : Optional[int] = GPTSwaTokenizer(__lowerCamelCase ) A : Optional[int] = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [4_65, 2_87, 2_65, 6_31, 8_42] ) A : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( __lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on A : List[str] = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , ) A : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) # fmt: off self.assertListEqual( __lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: A : Optional[Any] = GPTSwaTokenizer(__lowerCamelCase ) A : Dict = ["This is a test", "I was born in 92000, and this is falsé."] A : Dict = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__lowerCamelCase , __lowerCamelCase ): self.assertListEqual(tokenizer.encode_fast(__lowerCamelCase ) , __lowerCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(__lowerCamelCase , __lowerCamelCase ): self.assertEqual(tokenizer.decode_fast(__lowerCamelCase ) , __lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : List[str] = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off A : Dict = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__lowerCamelCase , )
711
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCamelCase_ : '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict: A : Optional[Any] = 4 A : List[str] = 32 A : Any = (32, 32) A : str = torch.manual_seed(0 ) A : int = torch.device(__lowerCamelCase ) A : List[str] = (batch_size, num_channels) + sizes A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) A : int = {"hidden_states": hidden_states} if include_temb: A : Any = 1_28 A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: A : str = torch.manual_seed(1 ) A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": A : Dict = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]: A , A : str = self.prepare_init_args_and_inputs_for_common() A : List[Any] = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): A : int = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) A : Any = output[0, -1, -3:, -3:] A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.prepare_init_args_and_inputs_for_common() A : str = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Optional[int] = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Optional[Any] = output[0] A : List[str] = torch.device(__lowerCamelCase ) A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase ) A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
17
0
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCamelCase_ : '''simple docstring''' def __init__( self : Dict , __lowerCamelCase : List[str] , ) -> Optional[int]: A : List[Any] = parent A : str = 13 A : Optional[int] = 7 A : int = True A : Any = True A : List[Any] = True A : Any = 99 A : int = 32 A : Tuple = 2 A : Union[str, Any] = 4 A : int = 37 A : Optional[int] = "gelu" A : Tuple = 0.1 A : Dict = 0.1 A : str = 5_12 A : Tuple = 16 A : Tuple = 2 A : str = 0.02 A : int = 3 A : Optional[int] = 4 A : Dict = None def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Tuple = None if self.use_input_mask: A : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A : Any = None A : Dict = None A : Union[str, Any] = None if self.use_labels: A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) A : Optional[Any] = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]: ( A ) : List[str] = self.prepare_config_and_inputs() A : str = True A : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ) -> Optional[Any]: A : Tuple = TFEsmModel(config=__lowerCamelCase ) A : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask} A : Optional[int] = model(__lowerCamelCase ) A : List[Any] = [input_ids, input_mask] A : List[Any] = model(__lowerCamelCase ) A : Optional[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , ) -> List[Any]: A : Dict = True A : Union[str, Any] = TFEsmModel(config=__lowerCamelCase ) A : Optional[int] = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } A : str = model(__lowerCamelCase ) A : Tuple = [input_ids, input_mask] A : Union[str, Any] = model(__lowerCamelCase , encoder_hidden_states=__lowerCamelCase ) # Also check the case where encoder outputs are not passed A : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> Dict: A : Dict = TFEsmForMaskedLM(config=__lowerCamelCase ) A : Any = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ) -> int: A : Any = self.num_labels A : Tuple = TFEsmForTokenClassification(config=__lowerCamelCase ) A : Dict = {"input_ids": input_ids, "attention_mask": input_mask} A : int = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: A : int = self.prepare_config_and_inputs() ( A ) : Tuple = config_and_inputs A : int = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) a__ = ( { "feature-extraction": TFEsmModel, "fill-mask": TFEsmForMaskedLM, "text-classification": TFEsmForSequenceClassification, "token-classification": TFEsmForTokenClassification, "zero-shot": TFEsmForSequenceClassification, } if is_tf_available() else {} ) a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : Any = TFEsmModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: A : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Tuple = TFEsmModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skip("Protein models do not support embedding resizing." ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: pass @unittest.skip("Protein models do not support embedding resizing." ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: pass def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A : Union[str, Any] = model.get_bias() assert isinstance(__lowerCamelCase , __lowerCamelCase ) for k, v in name.items(): assert isinstance(__lowerCamelCase , tf.Variable ) else: A : List[Any] = model.get_output_embeddings() assert x is None A : List[Any] = model.get_bias() assert name is None @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]: A : Any = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) A : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) A : Any = model(__lowerCamelCase )[0] A : int = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , __lowerCamelCase ) # compare the actual values for a slice. A : Union[str, Any] = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : Optional[int] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) A : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A : Optional[Any] = model(__lowerCamelCase )[0] # compare the actual values for a slice. A : Dict = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
712
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]: A : str = max_length A : Optional[int] = max_position_embeddings @add_start_docstrings(__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool: A : List[Any] = input_ids.shape[-1] A : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , ) A : str = start_length A : Optional[Any] = max_new_tokens A : Dict = start_length + max_new_tokens @add_start_docstrings(__lowerCamelCase ) def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]: A : str = max_time A : Dict = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool: return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: for stopping_criterium in self: if isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length elif isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = stopping_criteria.max_length A : Any = deepcopy(_lowerCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) ) return new_stopping_criteria
17
0
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
713
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
0
import datasets __SCREAMING_SNAKE_CASE = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ __SCREAMING_SNAKE_CASE = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ __SCREAMING_SNAKE_CASE = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]: return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
714
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
0
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=13 , __lowerCamelCase : Any=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=True , __lowerCamelCase : int=99 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : int=5 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Tuple=5_12 , __lowerCamelCase : Any=16 , __lowerCamelCase : Any=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=4 , ) -> Optional[Any]: A : Any = parent A : List[str] = batch_size A : List[Any] = seq_length A : Union[str, Any] = is_training A : Union[str, Any] = use_attention_mask A : Dict = use_token_type_ids A : List[str] = use_labels A : Tuple = vocab_size A : str = hidden_size A : Dict = num_hidden_layers A : Optional[Any] = num_attention_heads A : Optional[Any] = intermediate_size A : Union[str, Any] = hidden_act A : Union[str, Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : List[str] = max_position_embeddings A : str = type_vocab_size A : Any = type_sequence_label_size A : Any = initializer_range A : int = num_choices def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_attention_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A : str = None if self.use_token_type_ids: A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : str = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A : int = self.prepare_config_and_inputs() A : Dict = config_and_inputs A : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() A : Tuple = config_and_inputs A : Union[str, Any] = True A : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = True a__ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: A : int = FlaxRobertaPreLayerNormModelTester(self ) @slow def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: for model_class_name in self.all_model_classes: A : str = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__lowerCamelCase ) A : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase ) @require_flax class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : str = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__lowerCamelCase ) A : Tuple = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) A : Union[str, Any] = model(__lowerCamelCase )[0] A : Optional[int] = [1, 11, 5_02_65] self.assertEqual(list(output.shape ) , __lowerCamelCase ) # compare the actual values for a slice. A : Dict = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : str = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__lowerCamelCase ) A : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) A : List[Any] = model(__lowerCamelCase )[0] # compare the actual values for a slice. A : List[Any] = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
715
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
0
__SCREAMING_SNAKE_CASE = { """joule""": 1.0, """kilojoule""": 1000, """megajoule""": 1000000, """gigajoule""": 1000000000, """wattsecond""": 1.0, """watthour""": 3600, """kilowatthour""": 3600000, """newtonmeter""": 1.0, """calorie_nutr""": 4186.8, """kilocalorie_nutr""": 418_6800.00, """electronvolt""": 1.6_0_2_1_7_6_6_3_4e-1_9, """britishthermalunit_it""": 1055.0_5585, """footpound""": 1.35_5818, } def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: A : List[Any] = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {", ".join(_lowerCamelCase )}""" ) raise ValueError(_lowerCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
716
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int: A : Optional[int] = parent A : List[str] = batch_size A : Tuple = image_size A : List[str] = num_channels A : List[str] = embeddings_size A : List[str] = hidden_sizes A : str = depths A : Optional[Any] = is_training A : int = use_labels A : Optional[int] = hidden_act A : List[Any] = num_labels A : List[str] = scope A : str = len(__lowerCamelCase ) A : Optional[int] = out_features A : str = out_indices A : Optional[int] = num_groups def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple: A : Union[str, Any] = self.num_labels A : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]: A : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A : Optional[Any] = None A : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : List[str] = self.prepare_config_and_inputs() A , A , A : Tuple = config_and_inputs A : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Any = BitModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return @unittest.skip(reason="Bit does not output attentions" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) A : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Optional[Any] = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ): A : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Union[str, Any] = BitModelTester(self )
17
0
'''simple docstring''' # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class lowerCamelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False ) -> List[str]: A : List[Any] = scheduler A : int = optimizers if isinstance(__lowerCamelCase , (list, tuple) ) else [optimizers] A : Any = split_batches A : List[str] = step_with_optimizer A : Tuple = GradientState() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : str ) -> List[Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step A : str = AcceleratorState().num_processes for _ in range(__lowerCamelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , "total_steps" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) else: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: return self.scheduler.get_last_lr() def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: return self.scheduler.state_dict() def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> str: self.scheduler.load_state_dict(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: return self.scheduler.get_lr() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *__lowerCamelCase : str , **__lowerCamelCase : str ) -> Tuple: return self.scheduler.print_lr(*__lowerCamelCase , **__lowerCamelCase )
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
0
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : int , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : List[str] , __lowerCamelCase : int = None , __lowerCamelCase : int = None ) -> str: super().__init__() A : Optional[Any] = pad_token_id A : Dict = max_length A : Tuple = vocab A : str = merges A : int = BytePairTokenizer(__lowerCamelCase , __lowerCamelCase , sequence_length=__lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , __lowerCamelCase : GPTaTokenizer , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> List[str]: A : Any = [" ".join(__lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()] A : List[Any] = tokenizer.get_vocab() return cls(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , __lowerCamelCase : Union[str, os.PathLike] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Tuple ) -> int: A : List[Any] = GPTaTokenizer.from_pretrained(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) return cls.from_tokenizer(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , __lowerCamelCase : List[Any] ) -> Dict: return cls(**__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict: return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : int = None ) -> int: A : Optional[Any] = self.tf_tokenizer(__lowerCamelCase ) A : int = tf.ones_like(__lowerCamelCase ) if self.pad_token_id is not None: # pad the tokens up to max length A : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: A : List[str] = pad_model_inputs( __lowerCamelCase , max_seq_length=__lowerCamelCase , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
718
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( _A ,_A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = StableDiffusionInstructPixaPixPipeline a__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS a__ = IMAGE_TO_IMAGE_IMAGE_PARAMS a__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]: torch.manual_seed(0 ) A : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) A : str = PNDMScheduler(skip_prk_steps=__lowerCamelCase ) torch.manual_seed(0 ) A : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) A : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) A : Union[str, Any] = CLIPTextModel(__lowerCamelCase ) A : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A : List[str] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=0 ) -> Optional[Any]: A : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] A : Any = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ) if str(__lowerCamelCase ).startswith("mps" ): A : str = torch.manual_seed(__lowerCamelCase ) else: A : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) A : int = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: A : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator A : Any = self.get_dummy_components() A : Tuple = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) A : Union[str, Any] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : int = self.get_dummy_inputs(__lowerCamelCase ) A : List[str] = sd_pipe(**__lowerCamelCase ).images A : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A : Optional[int] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: A : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator A : Tuple = self.get_dummy_components() A : int = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) A : Union[str, Any] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) A : Tuple = "french fries" A : List[str] = sd_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase ) A : Dict = output.images A : str = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A : Optional[Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: A : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator A : int = self.get_dummy_components() A : str = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) A : List[Any] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Optional[Any] = self.get_dummy_inputs(__lowerCamelCase ) A : List[Any] = [inputs["prompt"]] * 2 A : Any = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 A : List[str] = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase ) A : Union[str, Any] = image / 2 + 0.5 A : Tuple = image.permute(0 , 3 , 1 , 2 ) A : int = image.repeat(2 , 1 , 1 , 1 ) A : Union[str, Any] = sd_pipe(**__lowerCamelCase ).images A : Optional[int] = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) A : List[Any] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: A : int = "cpu" # ensure determinism for the device-dependent torch.Generator A : Dict = self.get_dummy_components() A : Dict = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" ) A : str = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) A : str = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Optional[int] = self.get_dummy_inputs(__lowerCamelCase ) A : Dict = sd_pipe(**__lowerCamelCase ).images A : List[str] = image[0, -3:, -3:, -1] A : List[Any] = [round(__lowerCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(__lowerCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) A : Tuple = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: A : Optional[int] = self.get_dummy_components() A : List[Any] = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) A : Union[str, Any] = VaeImageProcessor(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase ) A : Union[str, Any] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Dict = pipe(**self.get_dummy_inputs_by_type(__lowerCamelCase , input_image_type="pt" ) )[0] A : Any = components["vae"] A : List[Any] = self.get_dummy_inputs_by_type(__lowerCamelCase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): A : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode() A : Optional[int] = pipe(**__lowerCamelCase )[0] A : Dict = np.abs(out - out_latents_inputs ).max() self.assertLess(__lowerCamelCase , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str=0 ) -> List[str]: A : str = torch.manual_seed(__lowerCamelCase ) A : Tuple = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) A : List[str] = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() A : Union[str, Any] = self.get_inputs() A : Dict = pipe(**__lowerCamelCase ).images A : List[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) A : Any = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: A : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase ) A : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() A : Union[str, Any] = self.get_inputs() A : List[Any] = pipe(**__lowerCamelCase ).images A : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) A : Any = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: A : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase ) A : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() A : int = self.get_inputs() A : List[str] = pipe(**__lowerCamelCase ).images A : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) A : int = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: A : List[Any] = 0 def callback_fn(__lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor ) -> None: A : str = True nonlocal number_of_steps number_of_steps += 1 if step == 1: A : str = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) A : Optional[Any] = latents[0, -3:, -3:, -1] A : int = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: A : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) A : Optional[int] = latents[0, -3:, -3:, -1] A : Union[str, Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 A : Optional[int] = False A : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa ) A : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() A : Optional[Any] = self.get_inputs() pipe(**__lowerCamelCase , callback=__lowerCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa ) A : Optional[int] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A : str = self.get_inputs() A : int = pipe(**__lowerCamelCase ) A : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : Optional[Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 A : int = inputs["image"].resize((5_04, 5_04) ) A : Optional[Any] = "timbrooks/instruct-pix2pix" A : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( __lowerCamelCase , safety_checker=__lowerCamelCase , ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() A : int = pipe(**__lowerCamelCase ) A : Optional[Any] = output.images[0] A : Tuple = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 5_04, 3) A : Dict = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
719
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (PNDMScheduler,) a__ = (("num_inference_steps", 50),) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]: A : Union[str, Any] = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple: A : Dict = dict(self.forward_default_kwargs ) A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : Union[str, Any] = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Any = self.get_scheduler_config(**__lowerCamelCase ) A : int = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : Dict = scheduler_class.from_pretrained(__lowerCamelCase ) new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str: A : List[str] = dict(self.forward_default_kwargs ) A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : List[str] = self.dummy_sample A : Any = 0.1 * sample A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Tuple = self.get_scheduler_config() A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : str = scheduler_class.from_pretrained(__lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[:] A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]: A : Optional[Any] = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(**__lowerCamelCase ) A : str = scheduler_class(**__lowerCamelCase ) A : List[str] = 10 A : Union[str, Any] = self.dummy_model() A : int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A : Tuple = model(__lowerCamelCase , __lowerCamelCase ) A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: A : Union[str, Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) A : List[Any] = self.dummy_sample A : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCamelCase ) elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ): A : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A : str = 27 for scheduler_class in self.scheduler_classes: A : Tuple = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : Optional[Any] = self.full_loop() A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Any = self.full_loop(prediction_type="v_prediction" ) A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) A : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
17
0
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = ["image_processor", "tokenizer"] a__ = "BlipImageProcessor" a__ = ("BertTokenizer", "BertTokenizerFast") def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ) -> List[Any]: A : Dict = False super().__init__(__lowerCamelCase , __lowerCamelCase ) A : int = self.image_processor def __call__( self : Tuple , __lowerCamelCase : ImageInput = None , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : List[str] , ) -> BatchEncoding: if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: A : Optional[int] = self.tokenizer A : List[str] = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) return text_encoding # add pixel_values A : Any = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase ) if text is not None: A : Any = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) else: A : Optional[int] = None if text_encoding is not None: encoding_image_processor.update(__lowerCamelCase ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[Any] ) -> List[str]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Dict: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : Optional[int] = self.tokenizer.model_input_names A : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
720
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
17
0
def UpperCAmelCase ( _lowerCamelCase ): if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("Input value must be an 'int' type" ) A : Tuple = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
721
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
0
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Dict = args.log_outputs A : Tuple = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric A : int = load_metric("wer" ) A : List[Any] = load_metric("cer" ) # compute metrics A : Tuple = wer.compute(references=result["target"] , predictions=result["prediction"] ) A : Dict = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results A : List[Any] = f"""WER: {wer_result}\nCER: {cer_result}""" print(_lowerCamelCase ) with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f: f.write(_lowerCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A : Dict = f"""log_{dataset_id}_predictions.txt""" A : List[str] = f"""log_{dataset_id}_targets.txt""" with open(_lowerCamelCase , "w" ) as p, open(_lowerCamelCase , "w" ) as t: # mapping function to write output def write_to_file(_lowerCamelCase , _lowerCamelCase ): p.write(f"""{i}""" + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f"""{i}""" + "\n" ) t.write(batch["target"] + "\n" ) result.map(_lowerCamelCase , with_indices=_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): A : Any = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A : Tuple = re.sub(_lowerCamelCase , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A : Union[str, Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: A : Optional[Any] = " ".join(text.split(_lowerCamelCase ) ) return text def UpperCAmelCase ( _lowerCamelCase ): # load dataset A : List[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_lowerCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) A : List[str] = feature_extractor.sampling_rate # resample audio A : Tuple = dataset.cast_column("audio" , Audio(sampling_rate=_lowerCamelCase ) ) # load eval pipeline if args.device is None: A : Optional[Any] = 0 if torch.cuda.is_available() else -1 A : Any = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_lowerCamelCase ): A : Any = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A : Union[str, Any] = prediction["text"] A : Dict = normalize_text(batch["sentence"] ) return batch # run inference on all examples A : int = dataset.map(_lowerCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
700
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if index == number_of_items: return 0 A : Tuple = 0 A : Optional[int] = 0 A : Optional[int] = knapsack(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 ) if weights[index] <= max_weight: A : Optional[int] = values[index] + knapsack( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , max_weight - weights[index] , index + 1 ) return max(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
701
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
17
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "open-llama" def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=10_00_00 , __lowerCamelCase : List[str]=40_96 , __lowerCamelCase : Any=1_10_08 , __lowerCamelCase : str=32 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Dict="silu" , __lowerCamelCase : List[Any]=20_48 , __lowerCamelCase : str=0.02 , __lowerCamelCase : List[str]=1e-6 , __lowerCamelCase : Dict=True , __lowerCamelCase : str=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Optional[Any] , ) -> Optional[Any]: A : Optional[int] = vocab_size A : Optional[Any] = max_position_embeddings A : Tuple = hidden_size A : List[Any] = intermediate_size A : Tuple = num_hidden_layers A : Any = num_attention_heads A : Optional[int] = hidden_act A : List[str] = initializer_range A : int = rms_norm_eps A : List[str] = use_cache A : Any = kwargs.pop( "use_memorry_efficient_attention" , __lowerCamelCase ) A : str = hidden_dropout_prob A : Any = attention_dropout_prob A : Tuple = use_stable_embedding A : Optional[int] = shared_input_output_embedding A : Any = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " F"""got {self.rope_scaling}""" ) A : Any = self.rope_scaling.get("type" , __lowerCamelCase ) A : Union[str, Any] = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0