code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
17
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCamelCase_ : '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict: A : Optional[Any] = 4 A : List[str] = 32 A : Any = (32, 32) A : str = torch.manual_seed(0 ) A : int = torch.device(__lowerCamelCase ) A : List[str] = (batch_size, num_channels) + sizes A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) A : int = {"hidden_states": hidden_states} if include_temb: A : Any = 1_28 A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: A : str = torch.manual_seed(1 ) A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": A : Dict = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]: A , A : str = self.prepare_init_args_and_inputs_for_common() A : List[Any] = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): A : int = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) A : Any = output[0, -1, -3:, -3:] A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.prepare_init_args_and_inputs_for_common() A : str = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Optional[int] = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Optional[Any] = output[0] A : List[str] = torch.device(__lowerCamelCase ) A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase ) A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
17
1
import fire from utils import calculate_rouge, save_json def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ): A : Union[str, Any] = [x.strip() for x in open(_lowerCamelCase ).readlines()] A : Tuple = [x.strip() for x in open(_lowerCamelCase ).readlines()][: len(_lowerCamelCase )] A : List[Any] = calculate_rouge(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) if save_path is not None: save_json(_lowerCamelCase , _lowerCamelCase , indent=_lowerCamelCase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
17
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]: A : str = max_length A : Optional[int] = max_position_embeddings @add_start_docstrings(__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool: A : List[Any] = input_ids.shape[-1] A : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , ) A : str = start_length A : Optional[Any] = max_new_tokens A : Dict = start_length + max_new_tokens @add_start_docstrings(__lowerCamelCase ) def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]: A : str = max_time A : Dict = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool: return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: for stopping_criterium in self: if isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length elif isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = stopping_criteria.max_length A : Any = deepcopy(_lowerCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) ) return new_stopping_criteria
17
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple: debug_launcher(test_script.main ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: debug_launcher(test_ops.main )
17
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
1
from typing import TYPE_CHECKING from ..utils import _LazyModule __SCREAMING_SNAKE_CASE = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "beit" def __init__( self : Dict , __lowerCamelCase : List[str]=81_92 , __lowerCamelCase : List[Any]=7_68 , __lowerCamelCase : int=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Optional[Any]=30_72 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : List[str]=2_24 , __lowerCamelCase : str=16 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Dict=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Any=False , __lowerCamelCase : str=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : int=True , __lowerCamelCase : Any=[3, 5, 7, 11] , __lowerCamelCase : Optional[int]=[1, 2, 3, 6] , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=0.4 , __lowerCamelCase : List[str]=2_56 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Any=2_55 , **__lowerCamelCase : Union[str, Any] , ) -> Optional[Any]: super().__init__(**__lowerCamelCase ) A : Optional[Any] = vocab_size A : Tuple = hidden_size A : List[Any] = num_hidden_layers A : List[Any] = num_attention_heads A : List[str] = intermediate_size A : Tuple = hidden_act A : Tuple = hidden_dropout_prob A : str = attention_probs_dropout_prob A : Optional[int] = initializer_range A : Any = layer_norm_eps A : Union[str, Any] = image_size A : Union[str, Any] = patch_size A : int = num_channels A : str = use_mask_token A : Any = use_absolute_position_embeddings A : str = use_relative_position_bias A : Dict = use_shared_relative_position_bias A : Optional[Any] = layer_scale_init_value A : str = drop_path_rate A : Any = use_mean_pooling # decode head attributes (semantic segmentation) A : List[str] = out_indices A : Dict = pool_scales # auxiliary head attributes (semantic segmentation) A : Dict = use_auxiliary_head A : Any = auxiliary_loss_weight A : Optional[int] = auxiliary_channels A : int = auxiliary_num_convs A : str = auxiliary_concat_input A : int = semantic_loss_ignore_index class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> float: return 1e-4
17
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
1
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class lowerCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' a__ = None class lowerCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' a__ = PandasConfig def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Any ) -> Union[str, Any]: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) A : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__lowerCamelCase , (str, list, tuple) ): A : Union[str, Any] = data_files if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A : Optional[Any] = [dl_manager.iter_files(__lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] A : int = [] for split_name, files in data_files.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): A : int = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A : Optional[int] = [dl_manager.iter_files(__lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : pa.Table ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example A : List[Any] = table_cast(__lowerCamelCase , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple ) -> Union[str, Any]: for i, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ): with open(__lowerCamelCase , "rb" ) as f: A : str = pa.Table.from_pandas(pd.read_pickle(__lowerCamelCase ) ) yield i, self._cast_table(__lowerCamelCase )
17
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int: A : Optional[int] = parent A : List[str] = batch_size A : Tuple = image_size A : List[str] = num_channels A : List[str] = embeddings_size A : List[str] = hidden_sizes A : str = depths A : Optional[Any] = is_training A : int = use_labels A : Optional[int] = hidden_act A : List[Any] = num_labels A : List[str] = scope A : str = len(__lowerCamelCase ) A : Optional[int] = out_features A : str = out_indices A : Optional[int] = num_groups def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple: A : Union[str, Any] = self.num_labels A : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]: A : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A : Optional[Any] = None A : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : List[str] = self.prepare_config_and_inputs() A , A , A : Tuple = config_and_inputs A : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Any = BitModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return @unittest.skip(reason="Bit does not output attentions" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) A : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Optional[Any] = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ): A : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Union[str, Any] = BitModelTester(self )
17
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = """▁""" __SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """google/reformer-crime-and-punishment""": ( """https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model""" ) } } __SCREAMING_SNAKE_CASE = { """google/reformer-crime-and-punishment""": 524288, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["input_ids", "attention_mask"] def __init__( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Optional[Any] , ) -> None: A : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) A : Union[str, Any] = vocab_file A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: return self.sp_model.get_piece_size() def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict[str, int]: A : int = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ) -> int: A : Union[str, Any] = self.__dict__.copy() A : int = None return state def __setstate__( self : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]: A : Dict = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A : Optional[int] = {} A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : int ) -> Dict: return self.sp_model.piece_to_id(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] ) -> Dict: if index < self.sp_model.get_piece_size(): A : Optional[Any] = self.sp_model.IdToPiece(__lowerCamelCase ) return token def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] ) -> str: A : Dict = [] A : int = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token A : Tuple = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return A : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: A : Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
17
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def UpperCAmelCase ( _lowerCamelCase ): A : List[Any] = 384 if "tiny" in model_name: A : List[str] = [3, 3, 9, 3] A : List[str] = [96, 192, 384, 768] if "small" in model_name: A : Optional[Any] = [3, 3, 27, 3] A : str = [96, 192, 384, 768] if "base" in model_name: A : str = [3, 3, 27, 3] A : Tuple = [128, 256, 512, 1024] A : str = 512 if "large" in model_name: A : Optional[int] = [3, 3, 27, 3] A : int = [192, 384, 768, 1536] A : str = 768 if "xlarge" in model_name: A : Dict = [3, 3, 27, 3] A : Optional[Any] = [256, 512, 1024, 2048] A : Tuple = 1024 # set label information A : List[str] = 150 A : Dict = "huggingface/label-files" A : Union[str, Any] = "ade20k-id2label.json" A : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) A : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} A : int = {v: k for k, v in idalabel.items()} A : Optional[int] = ConvNextConfig( depths=_lowerCamelCase , hidden_sizes=_lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] ) A : Optional[Any] = UperNetConfig( backbone_config=_lowerCamelCase , auxiliary_in_channels=_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , ) return config def UpperCAmelCase ( _lowerCamelCase ): A : str = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") ) if i > 0: rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") ) rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Union[str, Any] = dct.pop(_lowerCamelCase ) A : List[str] = val def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Union[str, Any] = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } A : Optional[Any] = model_name_to_url[model_name] A : Dict = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"] A : Optional[Any] = get_upernet_config(_lowerCamelCase ) A : Dict = UperNetForSemanticSegmentation(_lowerCamelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A : List[str] = state_dict.pop(_lowerCamelCase ) if "bn" in key: A : Tuple = key.replace("bn" , "batch_norm" ) A : Optional[int] = val # rename keys A : str = create_rename_keys(_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) # verify on image A : Any = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" A : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" ) A : List[Any] = SegformerImageProcessor() A : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values with torch.no_grad(): A : int = model(_lowerCamelCase ) if model_name == "upernet-convnext-tiny": A : Union[str, Any] = torch.tensor( [[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ) elif model_name == "upernet-convnext-small": A : Optional[int] = torch.tensor( [[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] ) elif model_name == "upernet-convnext-base": A : str = torch.tensor( [[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] ) elif model_name == "upernet-convnext-large": A : int = torch.tensor( [[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] ) elif model_name == "upernet-convnext-xlarge": A : Any = torch.tensor( [[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCamelCase ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(f"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(f"""openmmlab/{model_name}""" ) processor.push_to_hub(f"""openmmlab/{model_name}""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-convnext-tiny""", type=str, choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]], help="""Name of the ConvNext UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
17
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
1
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False, False, False @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = None a__ = True a__ = True a__ = None # Automatically constructed a__ = "dict" a__ = pa.struct({"bytes": pa.binary(), "path": pa.string()} ) a__ = field(default="Audio" ,init=_A ,repr=_A ) def __call__( self : Optional[int] ) -> Optional[int]: return self.pa_type def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, bytes, dict] ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(__lowerCamelCase , __lowerCamelCase ): return {"bytes": None, "path": value} elif isinstance(__lowerCamelCase , __lowerCamelCase ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes A : Union[str, Any] = BytesIO() sf.write(__lowerCamelCase , value["array"] , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) A : Tuple = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: A : Dict = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_27_67 A : Optional[int] = BytesIO(bytes() ) sf.write(__lowerCamelCase , __lowerCamelCase , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict: if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) A , A : Optional[int] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err A : List[str] = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: A : int = token_per_repo_id or {} A : Tuple = path.split("::" )[-1] try: A : List[Any] = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"] A : Tuple = token_per_repo_id[repo_id] except (ValueError, KeyError): A : int = None with xopen(__lowerCamelCase , "rb" , use_auth_token=__lowerCamelCase ) as f: A , A : List[str] = sf.read(__lowerCamelCase ) else: A , A : Tuple = sf.read(__lowerCamelCase ) A : List[str] = array.T if self.mono: A : Optional[int] = librosa.to_mono(__lowerCamelCase ) if self.sampling_rate and self.sampling_rate != sampling_rate: A : Dict = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate ) A : str = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray: if pa.types.is_string(storage.type ): A : int = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() ) A : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): A : Any = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) A : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): A : List[str] = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: A : Any = storage.field("bytes" ) else: A : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: A : Optional[Any] = storage.field("path" ) else: A : Union[str, Any] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) A : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) return array_cast(__lowerCamelCase , self.pa_type ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : pa.StructArray ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(__lowerCamelCase : str ): with xopen(__lowerCamelCase , "rb" ) as f: A : int = f.read() return bytes_ A : str = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) A : Optional[int] = pa.array( [os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) A : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(__lowerCamelCase , self.pa_type )
17
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (PNDMScheduler,) a__ = (("num_inference_steps", 50),) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]: A : Union[str, Any] = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple: A : Dict = dict(self.forward_default_kwargs ) A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : Union[str, Any] = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Any = self.get_scheduler_config(**__lowerCamelCase ) A : int = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : Dict = scheduler_class.from_pretrained(__lowerCamelCase ) new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str: A : List[str] = dict(self.forward_default_kwargs ) A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : List[str] = self.dummy_sample A : Any = 0.1 * sample A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Tuple = self.get_scheduler_config() A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : str = scheduler_class.from_pretrained(__lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[:] A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]: A : Optional[Any] = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(**__lowerCamelCase ) A : str = scheduler_class(**__lowerCamelCase ) A : List[str] = 10 A : Union[str, Any] = self.dummy_model() A : int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A : Tuple = model(__lowerCamelCase , __lowerCamelCase ) A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: A : Union[str, Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) A : List[Any] = self.dummy_sample A : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCamelCase ) elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ): A : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A : str = 27 for scheduler_class in self.scheduler_classes: A : Tuple = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : Optional[Any] = self.full_loop() A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Any = self.full_loop(prediction_type="v_prediction" ) A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) A : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
17
1
def UpperCAmelCase ( _lowerCamelCase = 100_0000 ): A : str = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _lowerCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
17
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
17
1
from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) A : Optional[Any] = b * b - 4 * a * c A : Optional[int] = (-b + sqrt(_lowerCamelCase )) / (2 * a) A : int = (-b - sqrt(_lowerCamelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): A , A : List[str] = quadratic_roots(a=5 , b=6 , c=1 ) print(f"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
17
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
1
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 __SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""") class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: A : Optional[Any] = 0 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : List[str] = AutoConfig.from_pretrained("bert-base-uncased" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: A : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: A : str = AutoConfig.for_model("roberta" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. A : Optional[int] = os.path.join(__lowerCamelCase , "fake-roberta" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "config.json" ) , "w" ) as f: f.write(json.dumps({} ) ) A : int = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: try: AutoConfig.register("custom" , __lowerCamelCase ) # Wrong model type will raise an error with self.assertRaises(__lowerCamelCase ): AutoConfig.register("model" , __lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCamelCase ): AutoConfig.register("bert" , __lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API A : Optional[Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase ) A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: with self.assertRaisesRegex( __lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ): A : str = AutoConfig.from_pretrained("bert-base" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any: with self.assertRaisesRegex( __lowerCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): A : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase , revision="aaaaaa" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: with self.assertRaisesRegex( __lowerCamelCase , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ): A : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowerCamelCase ): A : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase ) A : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase ) A : List[Any] = AutoConfig.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase ) self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "new-model" try: AutoConfig.register("new-model" , __lowerCamelCase ) # If remote code is not set, the default is to use local A : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote code is disabled, we load the local one. A : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote is enabled, we load from the Hub A : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
17
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
1
from __future__ import annotations def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # noqa: E741 while r - l > 1: A : Union[str, Any] = (l + r) // 2 if v[m] >= key: A : Any = m else: A : Optional[int] = m # noqa: E741 return r def UpperCAmelCase ( _lowerCamelCase ): if len(_lowerCamelCase ) == 0: return 0 A : Optional[int] = [0] * len(_lowerCamelCase ) A : Tuple = 1 A : List[Any] = v[0] for i in range(1 , len(_lowerCamelCase ) ): if v[i] < tail[0]: A : Optional[Any] = v[i] elif v[i] > tail[length - 1]: A : int = v[i] length += 1 else: A : List[str] = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
17
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
17
1
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' a__ = JukeboxTokenizer a__ = { "artist": "Zac Brown Band", "genres": "Country", "lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ", } @require_torch def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: import torch A : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" ) A : Tuple = tokenizer(**self.metas )["input_ids"] # fmt: off A : List[Any] = [ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple: import torch A : int = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" ) A : str = tokenizer(**self.metas )["input_ids"] # fmt: off A : List[str] = [ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
17
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
1
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class lowerCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Any ) -> str: super().__init__() A : str = nn.Linear(3 , 4 ) A : Optional[int] = nn.BatchNormad(4 ) A : int = nn.Linear(4 , 5 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : List[Any] ) -> Union[str, Any]: return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) ) class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : List[str] , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> List[Any]: return (args[0] + 1,) + args[1:], kwargs class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> Optional[Any]: return output + 1 class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple: A : Optional[int] = ModelForTest() A : Union[str, Any] = ModelHook() add_hook_to_module(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(test_model._hf_hook , __lowerCamelCase ) self.assertTrue(hasattr(__lowerCamelCase , "_old_forward" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , "forward" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] ) remove_hook_from_module(__lowerCamelCase ) self.assertFalse(hasattr(__lowerCamelCase , "_hf_hook" ) ) self.assertFalse(hasattr(__lowerCamelCase , "_old_forward" ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A : Any = ModelForTest() A : int = ModelHook() add_hook_to_module(__lowerCamelCase , __lowerCamelCase ) add_hook_to_module(__lowerCamelCase , __lowerCamelCase , append=__lowerCamelCase ) self.assertEqual(isinstance(test_model._hf_hook , __lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(__lowerCamelCase , "_old_forward" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , "forward" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] ) remove_hook_from_module(__lowerCamelCase ) self.assertFalse(hasattr(__lowerCamelCase , "_hf_hook" ) ) self.assertFalse(hasattr(__lowerCamelCase , "_old_forward" ) ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]: A : Optional[int] = ModelForTest() A : List[str] = torch.randn(2 , 3 ) A : int = test_model(x + 1 ) A : int = test_model(x + 2 ) A : Optional[int] = PreForwardHook() add_hook_to_module(__lowerCamelCase , __lowerCamelCase ) A : Any = test_model(__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A : int = PreForwardHook() add_hook_to_module(__lowerCamelCase , __lowerCamelCase ) A : str = test_model(__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks A : List[str] = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(__lowerCamelCase , __lowerCamelCase ) A : Union[str, Any] = test_model(__lowerCamelCase ) assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any: A : List[str] = ModelForTest() A : Union[str, Any] = torch.randn(2 , 3 ) A : Optional[int] = test_model(__lowerCamelCase ) A : Dict = PostForwardHook() add_hook_to_module(__lowerCamelCase , __lowerCamelCase ) A : Dict = test_model(__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A : Union[str, Any] = PostForwardHook() add_hook_to_module(__lowerCamelCase , __lowerCamelCase ) A : Union[str, Any] = test_model(__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks A : Optional[Any] = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(__lowerCamelCase , __lowerCamelCase ) A : Dict = test_model(__lowerCamelCase ) assert torch.allclose(__lowerCamelCase , output + 2 , atol=1e-5 ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: A : Tuple = ModelForTest() A : List[str] = torch.randn(2 , 3 ) A : int = test_model(__lowerCamelCase ) A : Any = PostForwardHook() add_hook_to_module(__lowerCamelCase , __lowerCamelCase ) A : int = test_model(__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) A : str = True A : List[str] = test_model(__lowerCamelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : str = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device A : Optional[Any] = torch.randn(2 , 3 ) A : int = model(__lowerCamelCase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(__lowerCamelCase , AlignDevicesHook(io_same_device=__lowerCamelCase ) ) A : Optional[Any] = torch.randn(2 , 3 ).to(0 ) A : Any = model(__lowerCamelCase ) self.assertEqual(output.device , torch.device(0 ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: A : str = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices A : Union[str, Any] = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True} add_hook_to_module(model.lineara , AlignDevicesHook(**__lowerCamelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowerCamelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__lowerCamelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device A : Any = torch.device(hook_kwargs["execution_device"] ) self.assertEqual(model.batchnorm.running_mean.device , __lowerCamelCase ) A : Any = torch.randn(2 , 3 ) A : List[Any] = model(__lowerCamelCase ) self.assertEqual(output.device , __lowerCamelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload A : List[Any] = { "execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True, "offload_buffers": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**__lowerCamelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowerCamelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__lowerCamelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) A : int = torch.randn(2 , 3 ) A : List[str] = model(__lowerCamelCase ) self.assertEqual(output.device , __lowerCamelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: A : List[str] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices A : Tuple = 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook(__lowerCamelCase , execution_device=__lowerCamelCase , offload=__lowerCamelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device A : Optional[int] = torch.device(__lowerCamelCase ) self.assertEqual(model.batchnorm.running_mean.device , __lowerCamelCase ) A : str = torch.randn(2 , 3 ) A : int = model(__lowerCamelCase ) self.assertEqual(output.device , __lowerCamelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__lowerCamelCase ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload attach_align_device_hook(__lowerCamelCase , execution_device=__lowerCamelCase , offload=__lowerCamelCase , offload_buffers=__lowerCamelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) A : Dict = torch.randn(2 , 3 ) A : Optional[int] = model(__lowerCamelCase ) self.assertEqual(output.device , __lowerCamelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__lowerCamelCase ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]: A : Union[str, Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices A : Any = 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook( __lowerCamelCase , execution_device=__lowerCamelCase , offload=__lowerCamelCase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device A : Dict = torch.device(__lowerCamelCase ) self.assertEqual(model.batchnorm.running_mean.device , __lowerCamelCase ) A : List[Any] = torch.randn(2 , 3 ) A : Optional[int] = model(__lowerCamelCase ) self.assertEqual(output.device , __lowerCamelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__lowerCamelCase ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload attach_align_device_hook( __lowerCamelCase , execution_device=__lowerCamelCase , offload=__lowerCamelCase , weights_map=model.state_dict() , offload_buffers=__lowerCamelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) A : Union[str, Any] = torch.randn(2 , 3 ) A : Any = model(__lowerCamelCase ) self.assertEqual(output.device , __lowerCamelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__lowerCamelCase ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
17
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __SCREAMING_SNAKE_CASE = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase ( ): A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json" A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys() return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) ) def UpperCAmelCase ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : List[Any] = Path(_lowerCamelCase ) / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): init_hf_modules() A : Tuple = Path(_lowerCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : Optional[int] = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Union[str, Any] = f.read() # Imports of the form `import .xxx` A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(_lowerCamelCase ) ) def UpperCAmelCase ( _lowerCamelCase ): A : Optional[int] = False A : Tuple = [module_file] A : Optional[int] = [] # Let's recurse through all relative imports while not no_change: A : Optional[Any] = [] for f in files_to_check: new_imports.extend(get_relative_imports(_lowerCamelCase ) ) A : Optional[Any] = Path(_lowerCamelCase ).parent A : List[str] = [str(module_path / m ) for m in new_imports] A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports] A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files] A : Tuple = len(_lowerCamelCase ) == 0 all_relative_imports.extend(_lowerCamelCase ) return all_relative_imports def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Dict = f.read() # Imports of the form `import xxx` A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Only keep the top-level module A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all A : Any = list(set(_lowerCamelCase ) ) A : Tuple = [] for imp in imports: try: importlib.import_module(_lowerCamelCase ) except ImportError: missing_packages.append(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" ) return get_relative_imports(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : int = module_path.replace(os.path.sep , "." ) A : Optional[Any] = importlib.import_module(_lowerCamelCase ) if class_name is None: return find_pipeline_class(_lowerCamelCase ) return getattr(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): from ..pipelines import DiffusionPipeline A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) ) A : Union[str, Any] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _lowerCamelCase ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) A : Any = cls return pipeline_class def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ): A : List[Any] = str(_lowerCamelCase ) A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ): A : Union[str, Any] = module_file_or_url A : Any = "local" elif pretrained_model_name_or_path.count("/" ) == 0: A : Optional[Any] = get_diffusers_versions() # cut ".dev0" A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: A : List[Any] = latest_version if latest_version[1:] in available_versions else "main" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: A : Optional[Any] = f"""v{revision}""" elif revision == "main": A : Dict = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {", ".join(available_versions + ["main"] )}.""" ) # community pipeline on GitHub A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase ) try: A : Optional[int] = cached_download( _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = "git" A : Any = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached A : Any = hf_hub_download( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment A : List[str] = check_imports(_lowerCamelCase ) # Now we move the module inside our cached dynamic modules. A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_lowerCamelCase ) A : Optional[int] = Path(_lowerCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_lowerCamelCase , submodule_path / module_file ) for module_needed in modules_needed: A : int = f"""{module_needed}.py""" shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = use_auth_token elif use_auth_token is True: A : Dict = HfFolder.get_token() else: A : Tuple = None A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. A : str = submodule_path / commit_hash A : List[str] = full_submodule + os.path.sep + commit_hash create_dynamic_module(_lowerCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(_lowerCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return os.path.join(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ): A : int = get_cached_module_file( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) )
17
1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if index == number_of_items: return 0 A : Tuple = 0 A : Optional[int] = 0 A : Optional[int] = knapsack(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 ) if weights[index] <= max_weight: A : Optional[int] = values[index] + knapsack( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , max_weight - weights[index] , index + 1 ) return max(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
17
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) __SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict: A : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , ) A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history if not do_eval: return A : List[Any] = [log for log in logs if "eval_loss" in log.keys()] A : Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A : List[str] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: self.run_seqaseq_quick( distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A : Dict = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } A : List[str] = experiments[experiment_id] A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} A : Union[str, Any] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] ) A : Dict = len(re.findall(__lowerCamelCase , cl.err ) ) self.assertEqual(__lowerCamelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : int = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , ) # Check metrics A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history A : Dict = [log for log in logs if "eval_loss" in log.keys()] A : Dict = eval_metrics[0] A : int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) # test if do_predict saves generations and metrics A : Optional[Any] = os.listdir(__lowerCamelCase ) A : Any = {os.path.basename(__lowerCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]: A : Optional[int] = "--skip_memory_metrics 0" A : str = self.run_trainer( max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , ) # Check metrics A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) A : int = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A : int = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A : Tuple = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( __lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]: A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" A : Optional[int] = self.get_auto_remove_tmp_dir() A : int = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() A : Optional[Any] = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCamelCase )} """.split() A : Optional[Any] = "\n --do_predict\n ".split() A : Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A : Dict = get_gpu_count() A : Any = get_torch_dist_unique_port() A : Optional[Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() A : Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCamelCase , env=self.get_env() ) else: A : List[Any] = ["run_translation.py"] + args with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): main() return output_dir
17
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : List[Any]=18 , __lowerCamelCase : str=30 , __lowerCamelCase : Any=4_00 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=True , ) -> int: A : int = size if size is not None else {"height": 18, "width": 18} A : List[Any] = parent A : int = batch_size A : Tuple = num_channels A : Optional[Any] = image_size A : Tuple = min_resolution A : Tuple = max_resolution A : Any = do_resize A : Dict = size A : str = apply_ocr def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: A : Tuple = LayoutLMvaImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str: A : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) self.assertTrue(hasattr(__lowerCamelCase , "apply_ocr" ) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: A : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) A : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: # Initialize image_processing A : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input A : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , __lowerCamelCase ) self.assertIsInstance(encoding.boxes , __lowerCamelCase ) # Test batched A : Tuple = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: # Initialize image_processing A : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input A : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched A : List[str] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: # Initialize image_processing A : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input A : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched A : Tuple = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: # with apply_OCR = True A : List[Any] = LayoutLMvaImageProcessor() from datasets import load_dataset A : Optional[int] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) A : int = Image.open(ds[0]["file"] ).convert("RGB" ) A : str = image_processing(__lowerCamelCase , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 A : Dict = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 A : Dict = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __lowerCamelCase ) self.assertListEqual(encoding.boxes , __lowerCamelCase ) # with apply_OCR = False A : Tuple = LayoutLMvaImageProcessor(apply_ocr=__lowerCamelCase ) A : Optional[int] = image_processing(__lowerCamelCase , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
17
from collections.abc import Sequence def UpperCAmelCase ( _lowerCamelCase = None ): if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A : Dict = nums[0] for i in range(1 , len(_lowerCamelCase ) ): A : Tuple = nums[i] A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip()) __SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
17
1
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
17
from math import sqrt def UpperCAmelCase ( _lowerCamelCase = 100_0000 ): A : int = 0 A : int = 0 A : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
17
1
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __SCREAMING_SNAKE_CASE = [ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def UpperCAmelCase ( _lowerCamelCase=True ): if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) ) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = None a__ = None def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> List[str]: with TemporaryDirectory() as tmp_dir: A : int = dataset_module_factory(__lowerCamelCase , cache_dir=__lowerCamelCase ) A : Optional[int] = import_main_class(dataset_module.module_path , dataset=__lowerCamelCase ) A : DatasetBuilder = builder_cls( cache_dir=__lowerCamelCase , config_name=__lowerCamelCase , hash=dataset_module.hash , ) A : List[Any] = "/".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=__lowerCamelCase ).replace(os.sep , "/" ), config.DATASET_INFO_FILENAME, ] ) A : Dict = cached_path(__lowerCamelCase , cache_dir=__lowerCamelCase ) self.assertTrue(os.path.exists(__lowerCamelCase ) ) @pytest.mark.integration def UpperCAmelCase ( _lowerCamelCase ): A : Tuple = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple" A : Union[str, Any] = dataset_module_factory("wikipedia" , cache_dir=_lowerCamelCase ) A : Union[str, Any] = import_main_class(dataset_module.module_path ) A : DatasetBuilder = builder_cls( cache_dir=_lowerCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam A : Dict = None builder_instance.download_and_prepare() A : List[Any] = builder_instance.as_dataset() assert ds @pytest.mark.integration def UpperCAmelCase ( _lowerCamelCase ): A : Tuple = dataset_module_factory("wikipedia" , cache_dir=_lowerCamelCase ) A : List[Any] = import_main_class(dataset_module.module_path , dataset=_lowerCamelCase ) A : DatasetBuilder = builder_cls( cache_dir=_lowerCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , ) A : Union[str, Any] = builder_instance.as_streaming_dataset() assert ds assert isinstance(_lowerCamelCase , _lowerCamelCase ) assert "train" in ds assert isinstance(ds["train"] , _lowerCamelCase ) assert next(iter(ds["train"] ) )
17
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE = """.""" if __name__ == "__main__": __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE = line.strip() __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths) raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
17
1
from __future__ import annotations import typing from collections import Counter def UpperCAmelCase ( _lowerCamelCase ): A : typing.Counter[int] = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(_lowerCamelCase , max_perimeter + 1 ): A : Any = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(_lowerCamelCase ): A : Union[str, Any] = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def UpperCAmelCase ( _lowerCamelCase = 1000 ): A : Optional[int] = pythagorean_triple(_lowerCamelCase ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F"""Perimeter {solution()} has maximum solutions""")
17
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str: A : List[Any] = parent A : Optional[int] = batch_size A : Any = image_size A : Optional[Any] = patch_size A : Optional[Any] = num_channels A : Tuple = is_training A : Optional[Any] = use_labels A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : Any = hidden_act A : Tuple = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = scope A : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : List[Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int: A : Optional[int] = DeiTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any: A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : List[str] = 1 A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict: A : str = self.type_sequence_label_size A : List[str] = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Any = 1 A : str = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ) : Tuple = config_and_inputs A : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = DeiTModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str: A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: if not self.model_tester.is_training: return A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Dict = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Tuple = False A : Any = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A : List[str] = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : int = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): A : Tuple = problem_type["title"] A : Optional[Any] = problem_type["num_labels"] A : List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A : int = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: A : Optional[Any] = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( __lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : List[str] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : str = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) A : Dict = self.default_image_processor A : Optional[int] = prepare_img() A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A : List[str] = model(__lowerCamelCase )
17
1
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: A : Any = "ylacombe/bark-small" A : Dict = tempfile.mkdtemp() A : Dict = "en_speaker_1" A : List[str] = "This is a test string" A : Any = "speaker_embeddings_path.json" A : int = "speaker_embeddings" def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> Any: return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: A : Any = self.get_tokenizer() A : Optional[int] = BarkProcessor(tokenizer=__lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) A : str = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]: A : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) A : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A : Optional[int] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str: A : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) A : Tuple = 35 A : Dict = 2 A : Dict = 8 A : str = { "semantic_prompt": np.ones(__lowerCamelCase ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset A : int = processor(text=self.input_string , voice_preset=__lowerCamelCase ) A : List[Any] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file A : Union[str, Any] = os.path.join(self.tmpdirname , "file.npz" ) np.savez(__lowerCamelCase , **__lowerCamelCase ) A : Optional[int] = processor(text=self.input_string , voice_preset=__lowerCamelCase ) A : Union[str, Any] = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub A : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: A : str = self.get_tokenizer() A : Optional[int] = BarkProcessor(tokenizer=__lowerCamelCase ) A : List[Any] = processor(text=self.input_string ) A : List[Any] = tokenizer( self.input_string , padding="max_length" , max_length=2_56 , add_special_tokens=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
17
from sklearn.metrics import recall_score import datasets __SCREAMING_SNAKE_CASE = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ __SCREAMING_SNAKE_CASE = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ __SCREAMING_SNAKE_CASE = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]: A : str = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
17
1
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : int , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : List[str] , __lowerCamelCase : int = None , __lowerCamelCase : int = None ) -> str: super().__init__() A : Optional[Any] = pad_token_id A : Dict = max_length A : Tuple = vocab A : str = merges A : int = BytePairTokenizer(__lowerCamelCase , __lowerCamelCase , sequence_length=__lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , __lowerCamelCase : GPTaTokenizer , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> List[str]: A : Any = [" ".join(__lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()] A : List[Any] = tokenizer.get_vocab() return cls(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , __lowerCamelCase : Union[str, os.PathLike] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Tuple ) -> int: A : List[Any] = GPTaTokenizer.from_pretrained(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) return cls.from_tokenizer(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , __lowerCamelCase : List[Any] ) -> Dict: return cls(**__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict: return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : int = None ) -> int: A : Optional[Any] = self.tf_tokenizer(__lowerCamelCase ) A : int = tf.ones_like(__lowerCamelCase ) if self.pad_token_id is not None: # pad the tokens up to max length A : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: A , A : List[str] = pad_model_inputs( __lowerCamelCase , max_seq_length=__lowerCamelCase , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
17
from collections import deque from .hash_table import HashTable class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]: super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) A : Dict = self.values[key] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
17
1
def UpperCAmelCase ( _lowerCamelCase ): if num <= 0: raise ValueError("Input must be a positive integer" ) A : Dict = [True] * (num + 1) A : Dict = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , _lowerCamelCase ): A : List[Any] = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE = int(input("""Enter a positive integer: """).strip()) print(prime_sieve_eratosthenes(user_num))
17
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCamelCase_ : '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict: A : Optional[Any] = 4 A : List[str] = 32 A : Any = (32, 32) A : str = torch.manual_seed(0 ) A : int = torch.device(__lowerCamelCase ) A : List[str] = (batch_size, num_channels) + sizes A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) A : int = {"hidden_states": hidden_states} if include_temb: A : Any = 1_28 A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: A : str = torch.manual_seed(1 ) A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": A : Dict = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]: A , A : str = self.prepare_init_args_and_inputs_for_common() A : List[Any] = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): A : int = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) A : Any = output[0, -1, -3:, -3:] A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.prepare_init_args_and_inputs_for_common() A : str = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Optional[int] = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Optional[Any] = output[0] A : List[str] = torch.device(__lowerCamelCase ) A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase ) A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
17
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", } } __SCREAMING_SNAKE_CASE = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } # Segments (not really needed) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = 4 class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = "left" def __init__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : str=False , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : int="<sep>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Tuple="<cls>" , __lowerCamelCase : Optional[Any]="<mask>" , __lowerCamelCase : int=["<eop>", "<eod>"] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Optional[int] , ) -> None: # Mask token behave like a normal word, i.e. include the space before it A : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) A : int = 3 A : Union[str, Any] = do_lower_case A : List[Any] = remove_space A : List[Any] = keep_accents A : str = vocab_file A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : str = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> Optional[Any]: A : Optional[int] = self.__dict__.copy() A : Dict = None return state def __setstate__( self : Any , __lowerCamelCase : Any ) -> Dict: A : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A : Optional[int] = {} A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : int ) -> Any: if self.remove_space: A : Union[str, Any] = " ".join(inputs.strip().split() ) else: A : str = inputs A : Dict = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: A : Optional[int] = unicodedata.normalize("NFKD" , __lowerCamelCase ) A : Tuple = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] ) if self.do_lower_case: A : Tuple = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]: A : Dict = self.preprocess_text(__lowerCamelCase ) A : Tuple = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) A : Any = [] for piece in pieces: if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): A : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A : Optional[Any] = cur_pieces[1:] else: A : List[str] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__lowerCamelCase ) else: new_pieces.append(__lowerCamelCase ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Tuple ) -> Dict: return self.sp_model.PieceToId(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[int] ) -> List[str]: return self.sp_model.IdToPiece(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Tuple: A : Any = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Union[str, Any] , ) -> str: A : Any = kwargs.pop("use_source_tokenizer" , __lowerCamelCase ) A : Any = self.convert_ids_to_tokens(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A : Dict = [] A : Dict = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) ) A : Optional[Any] = [] sub_texts.append(__lowerCamelCase ) else: current_sub_text.append(__lowerCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens A : Any = "".join(__lowerCamelCase ) A : Optional[int] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A : Dict = self.clean_up_tokenization(__lowerCamelCase ) return clean_text else: return text def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : Dict = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is not None: return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1] return ([0] * len(__lowerCamelCase )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : Optional[Any] = [self.sep_token_id] A : Dict = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return A : str = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: A : Tuple = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
17
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]: A : str = max_length A : Optional[int] = max_position_embeddings @add_start_docstrings(__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool: A : List[Any] = input_ids.shape[-1] A : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , ) A : str = start_length A : Optional[Any] = max_new_tokens A : Dict = start_length + max_new_tokens @add_start_docstrings(__lowerCamelCase ) def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]: A : str = max_time A : Dict = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool: return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: for stopping_criterium in self: if isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length elif isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = stopping_criteria.max_length A : Any = deepcopy(_lowerCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) ) return new_stopping_criteria
17
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): for attribute in key.split("." ): A : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: A : str = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: A : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": A : str = value elif weight_type == "weight_g": A : Dict = value elif weight_type == "weight_v": A : List[str] = value elif weight_type == "bias": A : str = value else: A : Union[str, Any] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : List[Any] = [] A : Optional[int] = fairseq_model.state_dict() A : Any = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): A : int = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) A : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): A : Dict = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned): A : str = True if "*" in mapped_key: A : str = name.split(_lowerCamelCase )[0].split("." )[-2] A : Tuple = mapped_key.replace("*" , _lowerCamelCase ) if "weight_g" in name: A : List[str] = "weight_g" elif "weight_v" in name: A : int = "weight_v" elif "weight" in name: A : Tuple = "weight" elif "bias" in name: A : int = "bias" else: A : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : str = full_name.split("conv_layers." )[-1] A : Union[str, Any] = name.split("." ) A : List[Any] = int(items[0] ) A : str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) A : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) A : Optional[Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) A : Optional[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) A : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ): if config_path is not None: A : List[Any] = HubertConfig.from_pretrained(_lowerCamelCase ) else: A : List[Any] = HubertConfig() if is_finetuned: if dict_path: A : Optional[int] = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq A : Dict = target_dict.pad_index A : Any = target_dict.bos_index A : Tuple = target_dict.eos_index A : int = len(target_dict.symbols ) A : Tuple = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , _lowerCamelCase ) A : Optional[Any] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) A : Dict = True if config.feat_extract_norm == "layer" else False A : Tuple = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) A : int = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) A : str = HubertForCTC(_lowerCamelCase ) else: A : int = HubertModel(_lowerCamelCase ) if is_finetuned: A , A , A : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: A , A , A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) A : Optional[Any] = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
17
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] __SCREAMING_SNAKE_CASE = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] __SCREAMING_SNAKE_CASE = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): __SCREAMING_SNAKE_CASE = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Any = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def UpperCAmelCase ( ): print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
17
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
1
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Tuple=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : int=True , __lowerCamelCase : int=99 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Optional[Any]=5 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : List[str]=5_12 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Tuple=None , ) -> Tuple: A : Optional[Any] = parent A : int = batch_size A : Dict = seq_length A : Any = is_training A : Tuple = use_input_mask A : Optional[int] = use_token_type_ids A : Union[str, Any] = use_labels A : Dict = vocab_size A : str = hidden_size A : int = num_hidden_layers A : Optional[Any] = num_attention_heads A : Any = intermediate_size A : Optional[int] = hidden_act A : Optional[Any] = hidden_dropout_prob A : str = attention_probs_dropout_prob A : Dict = max_position_embeddings A : Union[str, Any] = type_vocab_size A : List[str] = type_sequence_label_size A : List[Any] = initializer_range A : Tuple = num_labels A : Union[str, Any] = num_choices A : Tuple = scope def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Tuple = None if self.use_input_mask: A : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) A : Optional[int] = None A : Dict = None A : List[Any] = None if self.use_labels: A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) A : Optional[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]: A : Optional[Any] = DistilBertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = model(__lowerCamelCase , __lowerCamelCase ) A : Dict = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] ) -> Optional[Any]: A : Dict = DistilBertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Dict = DistilBertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> Optional[int]: A : Tuple = self.num_labels A : int = DistilBertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> Optional[Any]: A : Optional[int] = self.num_labels A : List[str] = DistilBertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Any: A : Union[str, Any] = self.num_choices A : Optional[Any] = DistilBertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: A : Optional[Any] = self.prepare_config_and_inputs() ((A) , (A) , (A) , (A) , (A) , (A)) : int = config_and_inputs A : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) a__ = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) a__ = True a__ = True a__ = True a__ = True def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: A : Optional[int] = DistilBertModelTester(self ) A : List[str] = ConfigTester(self , config_class=__lowerCamelCase , dim=37 ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DistilBertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: A , A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return A : List[str] = True A : int = model_class(config=__lowerCamelCase ) A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = torch.jit.trace( __lowerCamelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCamelCase , os.path.join(__lowerCamelCase , "traced_model.pt" ) ) A : List[str] = torch.jit.load(os.path.join(__lowerCamelCase , "traced_model.pt" ) , map_location=__lowerCamelCase ) loaded(inputs_dict["input_ids"].to(__lowerCamelCase ) , inputs_dict["attention_mask"].to(__lowerCamelCase ) ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple: A : List[Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" ) A : Union[str, Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) A : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] A : Optional[Any] = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , __lowerCamelCase ) A : List[str] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
17
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int: A : Optional[int] = parent A : List[str] = batch_size A : Tuple = image_size A : List[str] = num_channels A : List[str] = embeddings_size A : List[str] = hidden_sizes A : str = depths A : Optional[Any] = is_training A : int = use_labels A : Optional[int] = hidden_act A : List[Any] = num_labels A : List[str] = scope A : str = len(__lowerCamelCase ) A : Optional[int] = out_features A : str = out_indices A : Optional[int] = num_groups def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple: A : Union[str, Any] = self.num_labels A : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]: A : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A : Optional[Any] = None A : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : List[str] = self.prepare_config_and_inputs() A , A , A : Tuple = config_and_inputs A : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Any = BitModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return @unittest.skip(reason="Bit does not output attentions" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) A : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Optional[Any] = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ): A : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Union[str, Any] = BitModelTester(self )
17
1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): while a != 0: A , A : Dict = b % a, a return b def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): if gcd(_lowerCamelCase , _lowerCamelCase ) != 1: A : Tuple = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(_lowerCamelCase ) A , A , A : Optional[Any] = 1, 0, a A , A , A : str = 0, 1, m while va != 0: A : Optional[Any] = ua // va A , A , A , A , A , A : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
17
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def UpperCAmelCase ( _lowerCamelCase ): A : Optional[Any] = FileLock(str(tmpdir / "foo.lock" ) ) A : List[Any] = FileLock(str(tmpdir / "foo.lock" ) ) A : Any = 0.01 with locka.acquire(): with pytest.raises(_lowerCamelCase ): A : str = time.time() locka.acquire(_lowerCamelCase ) assert time.time() - _start > timeout def UpperCAmelCase ( _lowerCamelCase ): A : Any = "a" * 1000 + ".lock" A : List[str] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(_lowerCamelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A : Tuple = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_lowerCamelCase ): locka.acquire(0 )
17
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
1
import operator as op __SCREAMING_SNAKE_CASE = """scaler.pt""" __SCREAMING_SNAKE_CASE = """pytorch_model""" __SCREAMING_SNAKE_CASE = """random_states""" __SCREAMING_SNAKE_CASE = """optimizer""" __SCREAMING_SNAKE_CASE = """scheduler""" __SCREAMING_SNAKE_CASE = """pytorch_model.bin""" __SCREAMING_SNAKE_CASE = """pytorch_model.bin.index.json""" __SCREAMING_SNAKE_CASE = """model.safetensors""" __SCREAMING_SNAKE_CASE = """model.safetensors.index.json""" __SCREAMING_SNAKE_CASE = """1.10.2""" __SCREAMING_SNAKE_CASE = """py38""" __SCREAMING_SNAKE_CASE = """4.17.0""" __SCREAMING_SNAKE_CASE = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""] __SCREAMING_SNAKE_CASE = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""] __SCREAMING_SNAKE_CASE = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""] __SCREAMING_SNAKE_CASE = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""] __SCREAMING_SNAKE_CASE = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""] __SCREAMING_SNAKE_CASE = """2.0.1""" __SCREAMING_SNAKE_CASE = ["""pdsh""", """standard""", """openmpi""", """mvapich"""] __SCREAMING_SNAKE_CASE = ["""default""", """reduce-overhead""", """max-autotune"""] __SCREAMING_SNAKE_CASE = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 __SCREAMING_SNAKE_CASE = [ """nnodes""", """nproc_per_node""", """rdzv_backend""", """rdzv_endpoint""", """rdzv_id""", """rdzv_conf""", """standalone""", """max_restarts""", """monitor_interval""", """start_method""", """role""", """module""", """m""", """no_python""", """run_path""", """log_dir""", """r""", """redirects""", """t""", """tee""", """node_rank""", """master_addr""", """master_port""", ] __SCREAMING_SNAKE_CASE = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""] __SCREAMING_SNAKE_CASE = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
17
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (PNDMScheduler,) a__ = (("num_inference_steps", 50),) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]: A : Union[str, Any] = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple: A : Dict = dict(self.forward_default_kwargs ) A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : Union[str, Any] = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Any = self.get_scheduler_config(**__lowerCamelCase ) A : int = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : Dict = scheduler_class.from_pretrained(__lowerCamelCase ) new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str: A : List[str] = dict(self.forward_default_kwargs ) A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : List[str] = self.dummy_sample A : Any = 0.1 * sample A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Tuple = self.get_scheduler_config() A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : str = scheduler_class.from_pretrained(__lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[:] A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]: A : Optional[Any] = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(**__lowerCamelCase ) A : str = scheduler_class(**__lowerCamelCase ) A : List[str] = 10 A : Union[str, Any] = self.dummy_model() A : int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A : Tuple = model(__lowerCamelCase , __lowerCamelCase ) A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: A : Union[str, Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) A : List[Any] = self.dummy_sample A : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCamelCase ) elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ): A : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A : str = 27 for scheduler_class in self.scheduler_classes: A : Tuple = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : Optional[Any] = self.full_loop() A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Any = self.full_loop(prediction_type="v_prediction" ) A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) A : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
17
1
class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) -> str: A : Tuple = None A : Union[str, Any] = None A : int = graph self._normalize_graph(__lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = len(__lowerCamelCase ) A : Any = None def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> str: if sources is int: A : str = [sources] if sinks is int: A : int = [sinks] if len(__lowerCamelCase ) == 0 or len(__lowerCamelCase ) == 0: return A : Tuple = sources[0] A : List[str] = sinks[0] # make fake vertex if there are more # than one source or sink if len(__lowerCamelCase ) > 1 or len(__lowerCamelCase ) > 1: A : Optional[Any] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Optional[int] = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : str = max_input_flow A : List[str] = 0 A : Any = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : List[str] = max_input_flow A : str = size - 1 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[Any] ) -> Union[str, Any]: A : Any = algorithm(self ) class lowerCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : int ) -> List[str]: A : Optional[int] = flow_network A : List[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Any = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : int = flow_network.graph A : int = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: if not self.executed: self._algorithm() A : Optional[Any] = True def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: pass class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : int , __lowerCamelCase : Tuple ) -> Dict: super().__init__(__lowerCamelCase ) # use this to save your result A : List[Any] = -1 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Dict , __lowerCamelCase : Any ) -> Tuple: super().__init__(__lowerCamelCase ) A : Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Optional[int] = [0] * self.verticies_count A : Dict = [0] * self.verticies_count def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : List[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : List[Any] = 0 while i < len(__lowerCamelCase ): A : int = vertices_list[i] A : Optional[int] = self.heights[vertex_index] self.process_vertex(__lowerCamelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(__lowerCamelCase ) ) A : Any = 0 else: i += 1 A : List[Any] = sum(self.preflow[self.source_index] ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : str ) -> List[str]: while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(__lowerCamelCase , __lowerCamelCase ) self.relabel(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> int: A : List[str] = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Dict ) -> int: A : int = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : List[str] = min_height + 1 if __name__ == "__main__": __SCREAMING_SNAKE_CASE = [0] __SCREAMING_SNAKE_CASE = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] __SCREAMING_SNAKE_CASE = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network __SCREAMING_SNAKE_CASE = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate __SCREAMING_SNAKE_CASE = flow_network.find_maximum_flow() print(F"""maximum flow is {maximum_flow}""")
17
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
17
1
import math class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : str=0 ) -> Any: # a graph with Node 0,1,...,N-1 A : str = n A : Union[str, Any] = [ [math.inf for j in range(0 , __lowerCamelCase )] for i in range(0 , __lowerCamelCase ) ] # adjacency matrix for weight A : Any = [ [math.inf for j in range(0 , __lowerCamelCase )] for i in range(0 , __lowerCamelCase ) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> List[Any]: A : Tuple = w def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): A : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> Dict: return self.dp[u][v] if __name__ == "__main__": __SCREAMING_SNAKE_CASE = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
17
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
1
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=False ): A : Union[str, Any] = OmegaConf.load(_lowerCamelCase ) if display: print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) ) return config def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ): if conf_path is None: A : List[Any] = "./model_checkpoints/vqgan_only.yaml" A : Dict = load_config(_lowerCamelCase , display=_lowerCamelCase ) A : List[str] = VQModel(**config.model.params ) if ckpt_path is None: A : List[Any] = "./model_checkpoints/vqgan_only.pt" A : Dict = torch.load(_lowerCamelCase , map_location=_lowerCamelCase ) if ".ckpt" in ckpt_path: A : List[str] = sd["state_dict"] model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) model.to(_lowerCamelCase ) del sd return model def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A , A , A : List[str] = model.encode(_lowerCamelCase ) print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" ) A : List[Any] = model.decode(_lowerCamelCase ) return xrec def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=False ): A , A : str = string.rsplit("." , 1 ) if reload: A : Union[str, Any] = importlib.import_module(_lowerCamelCase ) importlib.reload(_lowerCamelCase ) return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls ) def UpperCAmelCase ( _lowerCamelCase ): if "target" not in config: raise KeyError("Expected key `target` to instantiate." ) return get_obj_from_str(config["target"] )(**config.get("params" , {} ) ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ): A : Any = instantiate_from_config(_lowerCamelCase ) if sd is not None: model.load_state_dict(_lowerCamelCase ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # load the specified checkpoint if ckpt: A : List[str] = torch.load(_lowerCamelCase , map_location="cpu" ) A : str = pl_sd["global_step"] print(f"""loaded model from global step {global_step}.""" ) else: A : int = {"state_dict": None} A : List[str] = None A : Optional[Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"] return model, global_step
17
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = 0 ): A : List[str] = length or len(_lowerCamelCase ) A : str = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A , A : Optional[Any] = list_data[i + 1], list_data[i] A : List[Any] = True return list_data if not swapped else bubble_sort(_lowerCamelCase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
17
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
17
1
from collections import deque from .hash_table import HashTable class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]: super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) A : Dict = self.values[key] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
17
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
1
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE = """python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("""dataclasses""") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("""importlib_metadata""") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=None ): require_version(deps[pkg] , _lowerCamelCase )
17
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __SCREAMING_SNAKE_CASE = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase ( ): A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json" A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys() return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) ) def UpperCAmelCase ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : List[Any] = Path(_lowerCamelCase ) / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): init_hf_modules() A : Tuple = Path(_lowerCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : Optional[int] = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Union[str, Any] = f.read() # Imports of the form `import .xxx` A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(_lowerCamelCase ) ) def UpperCAmelCase ( _lowerCamelCase ): A : Optional[int] = False A : Tuple = [module_file] A : Optional[int] = [] # Let's recurse through all relative imports while not no_change: A : Optional[Any] = [] for f in files_to_check: new_imports.extend(get_relative_imports(_lowerCamelCase ) ) A : Optional[Any] = Path(_lowerCamelCase ).parent A : List[str] = [str(module_path / m ) for m in new_imports] A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports] A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files] A : Tuple = len(_lowerCamelCase ) == 0 all_relative_imports.extend(_lowerCamelCase ) return all_relative_imports def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Dict = f.read() # Imports of the form `import xxx` A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Only keep the top-level module A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all A : Any = list(set(_lowerCamelCase ) ) A : Tuple = [] for imp in imports: try: importlib.import_module(_lowerCamelCase ) except ImportError: missing_packages.append(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" ) return get_relative_imports(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : int = module_path.replace(os.path.sep , "." ) A : Optional[Any] = importlib.import_module(_lowerCamelCase ) if class_name is None: return find_pipeline_class(_lowerCamelCase ) return getattr(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): from ..pipelines import DiffusionPipeline A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) ) A : Union[str, Any] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _lowerCamelCase ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) A : Any = cls return pipeline_class def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ): A : List[Any] = str(_lowerCamelCase ) A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ): A : Union[str, Any] = module_file_or_url A : Any = "local" elif pretrained_model_name_or_path.count("/" ) == 0: A : Optional[Any] = get_diffusers_versions() # cut ".dev0" A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: A : List[Any] = latest_version if latest_version[1:] in available_versions else "main" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: A : Optional[Any] = f"""v{revision}""" elif revision == "main": A : Dict = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {", ".join(available_versions + ["main"] )}.""" ) # community pipeline on GitHub A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase ) try: A : Optional[int] = cached_download( _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = "git" A : Any = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached A : Any = hf_hub_download( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment A : List[str] = check_imports(_lowerCamelCase ) # Now we move the module inside our cached dynamic modules. A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_lowerCamelCase ) A : Optional[int] = Path(_lowerCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_lowerCamelCase , submodule_path / module_file ) for module_needed in modules_needed: A : int = f"""{module_needed}.py""" shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = use_auth_token elif use_auth_token is True: A : Dict = HfFolder.get_token() else: A : Tuple = None A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. A : str = submodule_path / commit_hash A : List[str] = full_submodule + os.path.sep + commit_hash create_dynamic_module(_lowerCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(_lowerCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return os.path.join(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ): A : int = get_cached_module_file( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) )
17
1
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (DDPMParallelScheduler,) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **__lowerCamelCase : List[Any] ) -> Optional[Any]: A : int = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]: self.check_over_configs(thresholding=__lowerCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: A : int = self.scheduler_classes[0] A : str = self.get_scheduler_config() A : int = scheduler_class(**__lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A : Optional[Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : Any = scheduler_class(**__lowerCamelCase ) A : Dict = len(__lowerCamelCase ) A : List[Any] = self.dummy_model() A : Optional[int] = self.dummy_sample_deter A : List[Any] = self.dummy_sample_deter + 0.1 A : Dict = self.dummy_sample_deter - 0.1 A : Optional[int] = samplea.shape[0] A : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 ) A : int = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase ) A : Optional[int] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) A : List[str] = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) A : Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 1153.1833 ) < 1e-2 assert abs(result_mean.item() - 0.5005 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: A : Optional[Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : Tuple = scheduler_class(**__lowerCamelCase ) A : Any = len(__lowerCamelCase ) A : Dict = self.dummy_model() A : Optional[int] = self.dummy_sample_deter A : Dict = torch.manual_seed(0 ) for t in reversed(range(__lowerCamelCase ) ): # 1. predict noise residual A : List[str] = model(__lowerCamelCase , __lowerCamelCase ) # 2. predict previous mean of sample x_t-1 A : Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample A : Union[str, Any] = pred_prev_sample A : int = torch.sum(torch.abs(__lowerCamelCase ) ) A : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: A : Tuple = self.scheduler_classes[0] A : Tuple = self.get_scheduler_config(prediction_type="v_prediction" ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) A : Union[str, Any] = len(__lowerCamelCase ) A : str = self.dummy_model() A : Tuple = self.dummy_sample_deter A : int = torch.manual_seed(0 ) for t in reversed(range(__lowerCamelCase ) ): # 1. predict noise residual A : int = model(__lowerCamelCase , __lowerCamelCase ) # 2. predict previous mean of sample x_t-1 A : List[Any] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample A : List[str] = pred_prev_sample A : Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: A : Any = self.scheduler_classes[0] A : Optional[int] = self.get_scheduler_config() A : Tuple = scheduler_class(**__lowerCamelCase ) A : Dict = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__lowerCamelCase ) A : Optional[Any] = scheduler.timesteps for i, timestep in enumerate(__lowerCamelCase ): if i == len(__lowerCamelCase ) - 1: A : List[Any] = -1 else: A : Optional[int] = timesteps[i + 1] A : Optional[Any] = scheduler.previous_timestep(__lowerCamelCase ) A : Optional[int] = prev_t.item() self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: A : Union[str, Any] = self.scheduler_classes[0] A : Dict = self.get_scheduler_config() A : str = scheduler_class(**__lowerCamelCase ) A : Dict = [1_00, 87, 50, 51, 0] with self.assertRaises(__lowerCamelCase , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: A : List[str] = self.scheduler_classes[0] A : Tuple = self.get_scheduler_config() A : Tuple = scheduler_class(**__lowerCamelCase ) A : Any = [1_00, 87, 50, 1, 0] A : Optional[Any] = len(__lowerCamelCase ) with self.assertRaises(__lowerCamelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: A : List[Any] = self.scheduler_classes[0] A : Dict = self.get_scheduler_config() A : int = scheduler_class(**__lowerCamelCase ) A : List[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( __lowerCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=__lowerCamelCase )
17
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) __SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict: A : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , ) A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history if not do_eval: return A : List[Any] = [log for log in logs if "eval_loss" in log.keys()] A : Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A : List[str] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: self.run_seqaseq_quick( distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A : Dict = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } A : List[str] = experiments[experiment_id] A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} A : Union[str, Any] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] ) A : Dict = len(re.findall(__lowerCamelCase , cl.err ) ) self.assertEqual(__lowerCamelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : int = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , ) # Check metrics A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history A : Dict = [log for log in logs if "eval_loss" in log.keys()] A : Dict = eval_metrics[0] A : int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) # test if do_predict saves generations and metrics A : Optional[Any] = os.listdir(__lowerCamelCase ) A : Any = {os.path.basename(__lowerCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]: A : Optional[int] = "--skip_memory_metrics 0" A : str = self.run_trainer( max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , ) # Check metrics A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) A : int = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A : int = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A : Tuple = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( __lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]: A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" A : Optional[int] = self.get_auto_remove_tmp_dir() A : int = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() A : Optional[Any] = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCamelCase )} """.split() A : Optional[Any] = "\n --do_predict\n ".split() A : Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A : Dict = get_gpu_count() A : Any = get_torch_dist_unique_port() A : Optional[Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() A : Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCamelCase , env=self.get_env() ) else: A : List[Any] = ["run_translation.py"] + args with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): main() return output_dir
17
1
from queue import PriorityQueue from typing import Any import numpy as np def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): for nxt, d in graph[v]: if nxt in visited_forward: continue A : List[str] = cst_fwd.get(_lowerCamelCase , np.inf ) A : str = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) A : Union[str, Any] = new_cost_f A : List[Any] = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: A : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Any = -1 A : Tuple = set() A : Any = set() A : Union[str, Any] = {source: 0} A : str = {destination: 0} A : int = {source: None} A : List[str] = {destination: None} A : PriorityQueue[Any] = PriorityQueue() A : PriorityQueue[Any] = PriorityQueue() A : Dict = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): A , A : Optional[Any] = queue_forward.get() visited_forward.add(_lowerCamelCase ) A , A : int = queue_backward.get() visited_backward.add(_lowerCamelCase ) A : Any = pass_and_relaxation( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) A : Union[str, Any] = pass_and_relaxation( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: A : Union[str, Any] = shortest_distance return shortest_path_distance __SCREAMING_SNAKE_CASE = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } __SCREAMING_SNAKE_CASE = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
17
from collections.abc import Sequence def UpperCAmelCase ( _lowerCamelCase = None ): if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A : Dict = nums[0] for i in range(1 , len(_lowerCamelCase ) ): A : Tuple = nums[i] A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip()) __SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
17
1
import pytest __SCREAMING_SNAKE_CASE = """__dummy_dataset1__""" __SCREAMING_SNAKE_CASE = """ import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def UpperCAmelCase ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def UpperCAmelCase ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : int = dataset_loading_script_name A : int = tmp_path / "datasets" / script_name script_dir.mkdir(parents=_lowerCamelCase ) A : Any = script_dir / f"""{script_name}.py""" with open(_lowerCamelCase , "w" ) as f: f.write(_lowerCamelCase ) return str(_lowerCamelCase )
17
from math import sqrt def UpperCAmelCase ( _lowerCamelCase = 100_0000 ): A : int = 0 A : int = 0 A : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
17
1
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCamelCase_ : '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict: A : Optional[Any] = 4 A : List[str] = 32 A : Any = (32, 32) A : str = torch.manual_seed(0 ) A : int = torch.device(__lowerCamelCase ) A : List[str] = (batch_size, num_channels) + sizes A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) A : int = {"hidden_states": hidden_states} if include_temb: A : Any = 1_28 A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: A : str = torch.manual_seed(1 ) A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": A : Dict = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]: A , A : str = self.prepare_init_args_and_inputs_for_common() A : List[Any] = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): A : int = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) A : Any = output[0, -1, -3:, -3:] A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.prepare_init_args_and_inputs_for_common() A : str = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Optional[int] = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Optional[Any] = output[0] A : List[str] = torch.device(__lowerCamelCase ) A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase ) A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
17
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE = """.""" if __name__ == "__main__": __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE = line.strip() __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths) raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
17
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "deformable_detr" a__ = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : List[str] , __lowerCamelCase : List[str]=True , __lowerCamelCase : int=None , __lowerCamelCase : int=3 , __lowerCamelCase : Union[str, Any]=3_00 , __lowerCamelCase : Optional[int]=10_24 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : Tuple=10_24 , __lowerCamelCase : Dict=8 , __lowerCamelCase : Optional[Any]=6 , __lowerCamelCase : Union[str, Any]=10_24 , __lowerCamelCase : int=8 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]="relu" , __lowerCamelCase : Dict=2_56 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Tuple=1.0 , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[int]="sine" , __lowerCamelCase : int="resnet50" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Tuple=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=4 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Any=3_00 , __lowerCamelCase : str=False , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Optional[Any]=5 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=0.25 , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Union[str, Any] , ) -> Dict: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) A : Dict = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): A : int = backbone_config.get("model_type" ) A : Optional[int] = CONFIG_MAPPING[backbone_model_type] A : Dict = config_class.from_dict(__lowerCamelCase ) A : Tuple = use_timm_backbone A : Optional[int] = backbone_config A : str = num_channels A : Tuple = num_queries A : str = max_position_embeddings A : Tuple = d_model A : List[Any] = encoder_ffn_dim A : Optional[int] = encoder_layers A : Optional[Any] = encoder_attention_heads A : List[str] = decoder_ffn_dim A : Optional[int] = decoder_layers A : Union[str, Any] = decoder_attention_heads A : List[str] = dropout A : List[Any] = attention_dropout A : int = activation_dropout A : Dict = activation_function A : Dict = init_std A : int = init_xavier_std A : Dict = encoder_layerdrop A : str = auxiliary_loss A : List[Any] = position_embedding_type A : List[str] = backbone A : Optional[int] = use_pretrained_backbone A : Optional[Any] = dilation # deformable attributes A : Dict = num_feature_levels A : Optional[Any] = encoder_n_points A : List[Any] = decoder_n_points A : Tuple = two_stage A : Optional[int] = two_stage_num_proposals A : str = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher A : str = class_cost A : Union[str, Any] = bbox_cost A : List[Any] = giou_cost # Loss coefficients A : Optional[int] = mask_loss_coefficient A : int = dice_loss_coefficient A : Optional[Any] = bbox_loss_coefficient A : int = giou_loss_coefficient A : str = eos_coefficient A : Dict = focal_alpha A : List[Any] = disable_custom_kernels super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: return self.d_model def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: A : Optional[int] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: A : Any = self.backbone_config.to_dict() A : Any = self.__class__.model_type return output
17
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str: A : List[Any] = parent A : Optional[int] = batch_size A : Any = image_size A : Optional[Any] = patch_size A : Optional[Any] = num_channels A : Tuple = is_training A : Optional[Any] = use_labels A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : Any = hidden_act A : Tuple = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = scope A : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : List[Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int: A : Optional[int] = DeiTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any: A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : List[str] = 1 A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict: A : str = self.type_sequence_label_size A : List[str] = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Any = 1 A : str = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ) : Tuple = config_and_inputs A : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = DeiTModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str: A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: if not self.model_tester.is_training: return A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Dict = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Tuple = False A : Any = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A : List[str] = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : int = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): A : Tuple = problem_type["title"] A : Optional[Any] = problem_type["num_labels"] A : List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A : int = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: A : Optional[Any] = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( __lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : List[str] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : str = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) A : Dict = self.default_image_processor A : Optional[int] = prepare_img() A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A : List[str] = model(__lowerCamelCase )
17
1
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __SCREAMING_SNAKE_CASE = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ __SCREAMING_SNAKE_CASE = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ __SCREAMING_SNAKE_CASE = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]="auto" , __lowerCamelCase : List[Any]=-1 , __lowerCamelCase : Dict=0.9 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : List[Any]=5_00 , __lowerCamelCase : str="gpt2-large" , __lowerCamelCase : Optional[Any]=-1 , __lowerCamelCase : Optional[int]=10_24 , __lowerCamelCase : Optional[Any]=25 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=25 , ) -> List[Any]: A : List[Any] = compute_mauve( p_text=__lowerCamelCase , q_text=__lowerCamelCase , p_features=__lowerCamelCase , q_features=__lowerCamelCase , p_tokens=__lowerCamelCase , q_tokens=__lowerCamelCase , num_buckets=__lowerCamelCase , pca_max_data=__lowerCamelCase , kmeans_explained_var=__lowerCamelCase , kmeans_num_redo=__lowerCamelCase , kmeans_max_iter=__lowerCamelCase , featurize_model_name=__lowerCamelCase , device_id=__lowerCamelCase , max_text_length=__lowerCamelCase , divergence_curve_discretization_size=__lowerCamelCase , mauve_scaling_factor=__lowerCamelCase , verbose=__lowerCamelCase , seed=__lowerCamelCase , ) return out
17
from sklearn.metrics import recall_score import datasets __SCREAMING_SNAKE_CASE = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ __SCREAMING_SNAKE_CASE = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ __SCREAMING_SNAKE_CASE = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]: A : str = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
17
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
from collections import deque from .hash_table import HashTable class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]: super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) A : Dict = self.values[key] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
17
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "git_vision_model" def __init__( self : Optional[int] , __lowerCamelCase : str=7_68 , __lowerCamelCase : Dict=30_72 , __lowerCamelCase : str=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Any=2_24 , __lowerCamelCase : Any=16 , __lowerCamelCase : Optional[Any]="quick_gelu" , __lowerCamelCase : int=1e-5 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : List[Any]=0.02 , **__lowerCamelCase : List[str] , ) -> Optional[Any]: super().__init__(**__lowerCamelCase ) A : Tuple = hidden_size A : List[str] = intermediate_size A : Tuple = num_hidden_layers A : List[str] = num_attention_heads A : Optional[Any] = num_channels A : Tuple = patch_size A : Dict = image_size A : int = initializer_range A : List[Any] = attention_dropout A : Optional[Any] = layer_norm_eps A : List[Any] = hidden_act @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Union[str, Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__lowerCamelCase ) A , A : List[Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the vision config dict if we are loading from GITConfig if config_dict.get("model_type" ) == "git": A : List[str] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "git" def __init__( self : int , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any=3_05_22 , __lowerCamelCase : Tuple=7_68 , __lowerCamelCase : Tuple=6 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : List[Any]=30_72 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Optional[Any]=10_24 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : Any="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Any=1_01 , __lowerCamelCase : Union[str, Any]=1_02 , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Union[str, Any] , ) -> List[Any]: super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , pad_token_id=__lowerCamelCase , **__lowerCamelCase ) if vision_config is None: A : Tuple = {} logger.info("vision_config is None. initializing the GitVisionConfig with default values." ) A : List[Any] = GitVisionConfig(**__lowerCamelCase ) A : List[Any] = vocab_size A : str = hidden_size A : Union[str, Any] = num_hidden_layers A : List[Any] = num_attention_heads A : Any = hidden_act A : Any = intermediate_size A : str = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : Any = max_position_embeddings A : List[str] = initializer_range A : int = layer_norm_eps A : List[Any] = position_embedding_type A : str = use_cache A : Any = tie_word_embeddings A : Union[str, Any] = num_image_with_embedding A : List[str] = bos_token_id A : Dict = eos_token_id def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: A : Any = copy.deepcopy(self.__dict__ ) A : Dict = self.vision_config.to_dict() A : Dict = self.__class__.model_type return output
17
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCamelCase_ : '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict: A : Optional[Any] = 4 A : List[str] = 32 A : Any = (32, 32) A : str = torch.manual_seed(0 ) A : int = torch.device(__lowerCamelCase ) A : List[str] = (batch_size, num_channels) + sizes A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) A : int = {"hidden_states": hidden_states} if include_temb: A : Any = 1_28 A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: A : str = torch.manual_seed(1 ) A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": A : Dict = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]: A , A : str = self.prepare_init_args_and_inputs_for_common() A : List[Any] = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): A : int = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) A : Any = output[0, -1, -3:, -3:] A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.prepare_init_args_and_inputs_for_common() A : str = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Optional[int] = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Optional[Any] = output[0] A : List[str] = torch.device(__lowerCamelCase ) A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase ) A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
17
1
import math def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = 0 ): A : Tuple = end or len(_lowerCamelCase ) for i in range(_lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = i A : List[Any] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: A : List[Any] = array[temp_index - 1] temp_index -= 1 A : Union[str, Any] = temp_index_value return array def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # Max Heap A : Optional[int] = index A : Optional[int] = 2 * index + 1 # Left Node A : List[Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: A : int = left_index if right_index < heap_size and array[largest] < array[right_index]: A : Tuple = right_index if largest != index: A , A : str = array[largest], array[index] heapify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): A : int = len(_lowerCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for i in range(n - 1 , 0 , -1 ): A , A : Optional[Any] = array[0], array[i] heapify(_lowerCamelCase , 0 , _lowerCamelCase ) return array def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = low A : Tuple = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i A , A : Any = array[j], array[i] i += 1 def UpperCAmelCase ( _lowerCamelCase ): if len(_lowerCamelCase ) == 0: return array A : int = 2 * math.ceil(math.loga(len(_lowerCamelCase ) ) ) A : Any = 16 return intro_sort(_lowerCamelCase , 0 , len(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): while end - start > size_threshold: if max_depth == 0: return heap_sort(_lowerCamelCase ) max_depth -= 1 A : List[str] = median_of_a(_lowerCamelCase , _lowerCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) A : Union[str, Any] = partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) intro_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A : str = p return insertion_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma : """).strip() __SCREAMING_SNAKE_CASE = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
17
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]: A : str = max_length A : Optional[int] = max_position_embeddings @add_start_docstrings(__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool: A : List[Any] = input_ids.shape[-1] A : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , ) A : str = start_length A : Optional[Any] = max_new_tokens A : Dict = start_length + max_new_tokens @add_start_docstrings(__lowerCamelCase ) def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]: A : str = max_time A : Dict = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool: return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: for stopping_criterium in self: if isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length elif isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = stopping_criteria.max_length A : Any = deepcopy(_lowerCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) ) return new_stopping_criteria
17
1
from sklearn.metrics import recall_score import datasets __SCREAMING_SNAKE_CASE = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ __SCREAMING_SNAKE_CASE = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ __SCREAMING_SNAKE_CASE = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]: A : str = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
17
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
1
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = ["pixel_values"] def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Dict[str, int]] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_55 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : Any , ) -> None: super().__init__(**__lowerCamelCase ) A : Any = size if size is not None else {"shortest_edge": 2_56} A : Optional[Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) A : Tuple = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} A : List[str] = get_size_dict(__lowerCamelCase , param_name="crop_size" ) A : Optional[int] = do_resize A : Optional[int] = size A : List[Any] = resample A : Optional[int] = do_center_crop A : List[Any] = crop_size A : Dict = do_rescale A : Any = rescale_factor A : Dict = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Tuple , ) -> np.ndarray: A : int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) A : Union[str, Any] = get_resize_output_image_size(__lowerCamelCase , size=size["shortest_edge"] , default_to_square=__lowerCamelCase ) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ) -> np.ndarray: A : str = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : float , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] ) -> np.ndarray: return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Union[str, Any] , ) -> np.ndarray: return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : ImageInput , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[float] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : List[str] , ) -> str: A : int = do_resize if do_resize is not None else self.do_resize A : str = size if size is not None else self.size A : List[str] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) A : Any = resample if resample is not None else self.resample A : int = do_center_crop if do_center_crop is not None else self.do_center_crop A : int = crop_size if crop_size is not None else self.crop_size A : Any = get_size_dict(__lowerCamelCase , param_name="crop_size" ) A : Dict = do_rescale if do_rescale is not None else self.do_rescale A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Optional[Any] = image_mean if image_mean is not None else self.image_mean A : Dict = image_std if image_std is not None else self.image_std A : Optional[int] = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. A : Union[str, Any] = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: A : List[str] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_center_crop: A : List[str] = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images] if do_rescale: A : Tuple = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: A : str = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] A : Optional[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] A : int = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Tuple] = None ) -> Union[str, Any]: A : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__lowerCamelCase ): A : List[Any] = target_sizes.numpy() A : Optional[Any] = [] for idx in range(len(__lowerCamelCase ) ): A : Optional[int] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__lowerCamelCase ) A : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowerCamelCase ) else: A : Tuple = logits.argmax(dim=1 ) A : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
17
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
1
import math def UpperCAmelCase ( _lowerCamelCase ): A : str = 0 A : Any = 0 while num > 0: A : Union[str, Any] = num % 8 A : Tuple = octal + (remainder * math.floor(math.pow(10 , _lowerCamelCase ) )) counter += 1 A : Optional[int] = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f"""0o{int(_lowerCamelCase )}""" def UpperCAmelCase ( ): print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(65 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(216 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(512 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
17
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "encoder-decoder" a__ = True def __init__( self : Dict , **__lowerCamelCase : List[Any] ) -> Union[str, Any]: super().__init__(**__lowerCamelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A : List[Any] = kwargs.pop("encoder" ) A : List[Any] = encoder_config.pop("model_type" ) A : Optional[int] = kwargs.pop("decoder" ) A : Optional[int] = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig A : int = AutoConfig.for_model(__lowerCamelCase , **__lowerCamelCase ) A : str = AutoConfig.for_model(__lowerCamelCase , **__lowerCamelCase ) A : int = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[str] , __lowerCamelCase : PretrainedConfig , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : List[Any] ) -> PretrainedConfig: logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) A : Optional[int] = True A : Union[str, Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: A : List[str] = copy.deepcopy(self.__dict__ ) A : List[Any] = self.encoder.to_dict() A : Union[str, Any] = self.decoder.to_dict() A : Dict = self.__class__.model_type return output
17
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int: A : Optional[int] = parent A : List[str] = batch_size A : Tuple = image_size A : List[str] = num_channels A : List[str] = embeddings_size A : List[str] = hidden_sizes A : str = depths A : Optional[Any] = is_training A : int = use_labels A : Optional[int] = hidden_act A : List[Any] = num_labels A : List[str] = scope A : str = len(__lowerCamelCase ) A : Optional[int] = out_features A : str = out_indices A : Optional[int] = num_groups def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple: A : Union[str, Any] = self.num_labels A : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]: A : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A : Optional[Any] = None A : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : List[str] = self.prepare_config_and_inputs() A , A , A : Tuple = config_and_inputs A : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Any = BitModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return @unittest.skip(reason="Bit does not output attentions" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) A : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Optional[Any] = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ): A : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Union[str, Any] = BitModelTester(self )
17
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __SCREAMING_SNAKE_CASE = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = ["""ViTFeatureExtractor"""] __SCREAMING_SNAKE_CASE = ["""ViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """VIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTForImageClassification""", """ViTForMaskedImageModeling""", """ViTModel""", """ViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """TFViTForImageClassification""", """TFViTModel""", """TFViTPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """FlaxViTForImageClassification""", """FlaxViTModel""", """FlaxViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
1
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = XLMTokenizer a__ = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A : Tuple = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] A : List[str] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) A : int = ["l o 123", "lo w 1456", "e r</w> 1789", ""] A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(__lowerCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Tuple ) -> Optional[Any]: A : int = "lower newer" A : Optional[Any] = "lower newer" return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: A : int = XLMTokenizer(self.vocab_file , self.merges_file ) A : Dict = "lower" A : Dict = ["low", "er</w>"] A : Optional[Any] = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) A : Tuple = tokens + ["<unk>"] A : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: A : Union[str, Any] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) A : Dict = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) A : int = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) A : int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) A : int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
17
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "unispeech" def __init__( self : Optional[int] , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Dict=7_68 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=30_72 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict="group" , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowerCamelCase : int=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Dict=1_28 , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=0.05 , __lowerCamelCase : Tuple=10 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : str=0.0 , __lowerCamelCase : str=10 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=3_20 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Union[str, Any]=1_00 , __lowerCamelCase : Optional[Any]=2_56 , __lowerCamelCase : Any=2_56 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[Any]="mean" , __lowerCamelCase : Dict=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=2_56 , __lowerCamelCase : Union[str, Any]=80 , __lowerCamelCase : int=0 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : int=0.5 , **__lowerCamelCase : Any , ) -> List[Any]: super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase ) A : Tuple = hidden_size A : Union[str, Any] = feat_extract_norm A : int = feat_extract_activation A : List[str] = list(__lowerCamelCase ) A : str = list(__lowerCamelCase ) A : Optional[Any] = list(__lowerCamelCase ) A : Tuple = conv_bias A : Tuple = num_conv_pos_embeddings A : int = num_conv_pos_embedding_groups A : Dict = len(self.conv_dim ) A : List[str] = num_hidden_layers A : Any = intermediate_size A : Optional[Any] = hidden_act A : List[str] = num_attention_heads A : Optional[Any] = hidden_dropout A : Dict = attention_dropout A : str = activation_dropout A : Union[str, Any] = feat_proj_dropout A : Union[str, Any] = final_dropout A : List[Any] = layerdrop A : Optional[Any] = layer_norm_eps A : List[Any] = initializer_range A : List[Any] = num_ctc_classes A : Tuple = vocab_size A : int = do_stable_layer_norm A : str = use_weighted_layer_sum A : Any = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A : Dict = apply_spec_augment A : int = mask_time_prob A : List[str] = mask_time_length A : List[str] = mask_time_min_masks A : List[str] = mask_feature_prob A : int = mask_feature_length A : int = mask_feature_min_masks # parameters for pretraining with codevector quantized representations A : Optional[Any] = num_codevectors_per_group A : Optional[int] = num_codevector_groups A : List[Any] = contrastive_logits_temperature A : Optional[Any] = feat_quantizer_dropout A : Dict = num_negatives A : Dict = codevector_dim A : Any = proj_codevector_dim A : List[str] = diversity_loss_weight # ctc loss A : Any = ctc_loss_reduction A : Union[str, Any] = ctc_zero_infinity # pretraining loss A : str = replace_prob @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: return functools.reduce(operator.mul , self.conv_stride , 1 )
17
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (PNDMScheduler,) a__ = (("num_inference_steps", 50),) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]: A : Union[str, Any] = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple: A : Dict = dict(self.forward_default_kwargs ) A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : Union[str, Any] = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Any = self.get_scheduler_config(**__lowerCamelCase ) A : int = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : Dict = scheduler_class.from_pretrained(__lowerCamelCase ) new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str: A : List[str] = dict(self.forward_default_kwargs ) A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : List[str] = self.dummy_sample A : Any = 0.1 * sample A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Tuple = self.get_scheduler_config() A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : str = scheduler_class.from_pretrained(__lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[:] A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]: A : Optional[Any] = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(**__lowerCamelCase ) A : str = scheduler_class(**__lowerCamelCase ) A : List[str] = 10 A : Union[str, Any] = self.dummy_model() A : int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A : Tuple = model(__lowerCamelCase , __lowerCamelCase ) A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: A : Union[str, Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) A : List[Any] = self.dummy_sample A : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCamelCase ) elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ): A : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A : str = 27 for scheduler_class in self.scheduler_classes: A : Tuple = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : Optional[Any] = self.full_loop() A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Any = self.full_loop(prediction_type="v_prediction" ) A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) A : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
17
1
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Tuple = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) A : Optional[int] = downstream_dict["projector.weight"] A : List[str] = downstream_dict["projector.bias"] A : List[str] = downstream_dict["model.post_net.linear.weight"] A : List[str] = downstream_dict["model.post_net.linear.bias"] return model def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : str = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) A : Tuple = downstream_dict["model.linear.weight"] A : str = downstream_dict["model.linear.bias"] return model def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : int = UniSpeechSatForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) A : Optional[int] = downstream_dict["connector.weight"] A : str = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): A : Dict = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] A : Union[str, Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] A : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] A : str = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] A : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] A : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] A : Optional[Any] = downstream_dict["objective.W"] return model @torch.no_grad() def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = torch.load(_lowerCamelCase , map_location="cpu" ) A : Dict = checkpoint["Downstream"] A : Dict = UniSpeechSatConfig.from_pretrained(_lowerCamelCase ) A : Any = WavaVecaFeatureExtractor.from_pretrained( _lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase ) A : Dict = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): A : str = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) elif arch.endswith("ForAudioFrameClassification" ): A : Optional[int] = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) elif arch.endswith("ForXVector" ): A : Optional[Any] = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: A : Union[str, Any] = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(_lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model.""" ) parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""") parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""") __SCREAMING_SNAKE_CASE = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
17
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
17
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "open-llama" def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=10_00_00 , __lowerCamelCase : List[str]=40_96 , __lowerCamelCase : Any=1_10_08 , __lowerCamelCase : str=32 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Dict="silu" , __lowerCamelCase : List[Any]=20_48 , __lowerCamelCase : str=0.02 , __lowerCamelCase : List[str]=1e-6 , __lowerCamelCase : Dict=True , __lowerCamelCase : str=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Optional[Any] , ) -> Optional[Any]: A : Optional[int] = vocab_size A : Optional[Any] = max_position_embeddings A : Tuple = hidden_size A : List[Any] = intermediate_size A : Tuple = num_hidden_layers A : Any = num_attention_heads A : Optional[int] = hidden_act A : List[str] = initializer_range A : int = rms_norm_eps A : List[str] = use_cache A : Any = kwargs.pop( "use_memorry_efficient_attention" , __lowerCamelCase ) A : str = hidden_dropout_prob A : Any = attention_dropout_prob A : Tuple = use_stable_embedding A : Optional[int] = shared_input_output_embedding A : Any = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " F"""got {self.rope_scaling}""" ) A : Any = self.rope_scaling.get("type" , __lowerCamelCase ) A : Union[str, Any] = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
17
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""", """funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""", """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""", """funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""", } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "funnel" a__ = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self : int , __lowerCamelCase : Any=3_05_22 , __lowerCamelCase : int=[4, 4, 4] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=2 , __lowerCamelCase : int=7_68 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Any=30_72 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=1e-9 , __lowerCamelCase : Any="mean" , __lowerCamelCase : Optional[Any]="relative_shift" , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , **__lowerCamelCase : Optional[int] , ) -> Any: A : Dict = vocab_size A : Tuple = block_sizes A : Union[str, Any] = [1] * len(__lowerCamelCase ) if block_repeats is None else block_repeats assert len(__lowerCamelCase ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." A : int = num_decoder_layers A : Tuple = d_model A : int = n_head A : Any = d_head A : List[Any] = d_inner A : str = hidden_act A : Union[str, Any] = hidden_dropout A : int = attention_dropout A : Optional[int] = activation_dropout A : Tuple = initializer_range A : Optional[Any] = initializer_std A : Union[str, Any] = layer_norm_eps assert pooling_type in [ "mean", "max", ], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported.""" A : Tuple = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported.""" A : Union[str, Any] = attention_type A : Tuple = separate_cls A : List[str] = truncate_seq A : Dict = pool_q_only super().__init__(**__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str: return sum(self.block_sizes ) @num_hidden_layers.setter def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : int ) -> Dict: raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: return len(self.block_sizes ) @num_blocks.setter def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Tuple ) -> str: raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
17
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
17
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __SCREAMING_SNAKE_CASE = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __SCREAMING_SNAKE_CASE = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = state_dict.pop(_lowerCamelCase ) A : Tuple = val def UpperCAmelCase ( _lowerCamelCase ): A : Optional[int] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A : Optional[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) A : Dict = value else: A : Dict = value return new_state_dict def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=False ): A : int = "" if is_panoptic: A : List[str] = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) A : Optional[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict A : Any = in_proj_weight[:256, :] A : Optional[Any] = in_proj_bias[:256] A : List[str] = in_proj_weight[256:512, :] A : List[str] = in_proj_bias[256:512] A : List[str] = in_proj_weight[-256:, :] A : List[str] = in_proj_bias[-256:] def UpperCAmelCase ( ): A : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" A : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: A : List[str] = "resnet101" if "dc5" in model_name: A : List[Any] = True A : List[str] = "panoptic" in model_name if is_panoptic: A : Any = 250 else: A : List[Any] = 91 A : Optional[Any] = "huggingface/label-files" A : Optional[Any] = "coco-detection-id2label.json" A : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) A : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()} A : int = idalabel A : Dict = {v: k for k, v in idalabel.items()} # load image processor A : Optional[Any] = "coco_panoptic" if is_panoptic else "coco_detection" A : int = ConditionalDetrImageProcessor(format=_lowerCamelCase ) # prepare image A : Any = prepare_img() A : Optional[Any] = image_processor(images=_lowerCamelCase , return_tensors="pt" ) A : Optional[Any] = encoding["pixel_values"] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub A : Optional[Any] = torch.hub.load("DeppMeng/ConditionalDETR" , _lowerCamelCase , pretrained=_lowerCamelCase ).eval() A : List[str] = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: A : Dict = "conditional_detr." + src rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A : List[Any] = rename_backbone_keys(_lowerCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(_lowerCamelCase , is_panoptic=_lowerCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A : Optional[Any] = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): A : List[str] = state_dict.pop(_lowerCamelCase ) A : int = val elif "class_labels_classifier" in key or "bbox_predictor" in key: A : Dict = state_dict.pop(_lowerCamelCase ) A : Any = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: A : Tuple = state_dict.pop(_lowerCamelCase ) A : Dict = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): A : Optional[int] = state_dict.pop(_lowerCamelCase ) A : Dict = val # finally, create HuggingFace model and load state dict A : str = ConditionalDetrForSegmentation(_lowerCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() model.push_to_hub(repo_id=_lowerCamelCase , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion A : Dict = conditional_detr(_lowerCamelCase ) A : Union[str, Any] = model(_lowerCamelCase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
17
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __SCREAMING_SNAKE_CASE = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase ( ): A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json" A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys() return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) ) def UpperCAmelCase ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : List[Any] = Path(_lowerCamelCase ) / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): init_hf_modules() A : Tuple = Path(_lowerCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : Optional[int] = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Union[str, Any] = f.read() # Imports of the form `import .xxx` A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(_lowerCamelCase ) ) def UpperCAmelCase ( _lowerCamelCase ): A : Optional[int] = False A : Tuple = [module_file] A : Optional[int] = [] # Let's recurse through all relative imports while not no_change: A : Optional[Any] = [] for f in files_to_check: new_imports.extend(get_relative_imports(_lowerCamelCase ) ) A : Optional[Any] = Path(_lowerCamelCase ).parent A : List[str] = [str(module_path / m ) for m in new_imports] A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports] A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files] A : Tuple = len(_lowerCamelCase ) == 0 all_relative_imports.extend(_lowerCamelCase ) return all_relative_imports def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Dict = f.read() # Imports of the form `import xxx` A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Only keep the top-level module A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all A : Any = list(set(_lowerCamelCase ) ) A : Tuple = [] for imp in imports: try: importlib.import_module(_lowerCamelCase ) except ImportError: missing_packages.append(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" ) return get_relative_imports(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : int = module_path.replace(os.path.sep , "." ) A : Optional[Any] = importlib.import_module(_lowerCamelCase ) if class_name is None: return find_pipeline_class(_lowerCamelCase ) return getattr(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): from ..pipelines import DiffusionPipeline A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) ) A : Union[str, Any] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _lowerCamelCase ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) A : Any = cls return pipeline_class def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ): A : List[Any] = str(_lowerCamelCase ) A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ): A : Union[str, Any] = module_file_or_url A : Any = "local" elif pretrained_model_name_or_path.count("/" ) == 0: A : Optional[Any] = get_diffusers_versions() # cut ".dev0" A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: A : List[Any] = latest_version if latest_version[1:] in available_versions else "main" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: A : Optional[Any] = f"""v{revision}""" elif revision == "main": A : Dict = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {", ".join(available_versions + ["main"] )}.""" ) # community pipeline on GitHub A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase ) try: A : Optional[int] = cached_download( _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = "git" A : Any = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached A : Any = hf_hub_download( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment A : List[str] = check_imports(_lowerCamelCase ) # Now we move the module inside our cached dynamic modules. A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_lowerCamelCase ) A : Optional[int] = Path(_lowerCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_lowerCamelCase , submodule_path / module_file ) for module_needed in modules_needed: A : int = f"""{module_needed}.py""" shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = use_auth_token elif use_auth_token is True: A : Dict = HfFolder.get_token() else: A : Tuple = None A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. A : str = submodule_path / commit_hash A : List[str] = full_submodule + os.path.sep + commit_hash create_dynamic_module(_lowerCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(_lowerCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return os.path.join(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ): A : int = get_cached_module_file( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) )
17
1
def UpperCAmelCase ( _lowerCamelCase ): if len(_lowerCamelCase ) <= 1: return [tuple(_lowerCamelCase )] A : str = [] def generate(_lowerCamelCase , _lowerCamelCase ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , _lowerCamelCase ) for i in range(k - 1 ): if k % 2 == 0: # k is even A , A : Optional[int] = arr[k - 1], arr[i] else: # k is odd A , A : Dict = arr[k - 1], arr[0] generate(k - 1 , _lowerCamelCase ) generate(len(_lowerCamelCase ) , _lowerCamelCase ) return res if __name__ == "__main__": __SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip() __SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
17
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) __SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict: A : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , ) A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history if not do_eval: return A : List[Any] = [log for log in logs if "eval_loss" in log.keys()] A : Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A : List[str] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: self.run_seqaseq_quick( distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A : Dict = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } A : List[str] = experiments[experiment_id] A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} A : Union[str, Any] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] ) A : Dict = len(re.findall(__lowerCamelCase , cl.err ) ) self.assertEqual(__lowerCamelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : int = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , ) # Check metrics A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history A : Dict = [log for log in logs if "eval_loss" in log.keys()] A : Dict = eval_metrics[0] A : int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) # test if do_predict saves generations and metrics A : Optional[Any] = os.listdir(__lowerCamelCase ) A : Any = {os.path.basename(__lowerCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]: A : Optional[int] = "--skip_memory_metrics 0" A : str = self.run_trainer( max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , ) # Check metrics A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) A : int = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A : int = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A : Tuple = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( __lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]: A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" A : Optional[int] = self.get_auto_remove_tmp_dir() A : int = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() A : Optional[Any] = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCamelCase )} """.split() A : Optional[Any] = "\n --do_predict\n ".split() A : Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A : Dict = get_gpu_count() A : Any = get_torch_dist_unique_port() A : Optional[Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() A : Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCamelCase , env=self.get_env() ) else: A : List[Any] = ["run_translation.py"] + args with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): main() return output_dir
17
1
from __future__ import annotations import requests __SCREAMING_SNAKE_CASE = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = "new" , _lowerCamelCase = None ): A : int = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(_lowerCamelCase ) - valid_terms ) ): A : Optional[Any] = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(_lowerCamelCase ) A : List[str] = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError A : Any = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(_lowerCamelCase )} A : Tuple = {} for id_ in range(_lowerCamelCase ): A : Union[str, Any] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
17
from collections.abc import Sequence def UpperCAmelCase ( _lowerCamelCase = None ): if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A : Dict = nums[0] for i in range(1 , len(_lowerCamelCase ) ): A : Tuple = nums[i] A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip()) __SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
17
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: A : Tuple = tempfile.mkdtemp() A : Tuple = BlipImageProcessor() A : Optional[Any] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) A : Tuple = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" ) A : Dict = InstructBlipProcessor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **__lowerCamelCase : str ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **__lowerCamelCase : List[Any] ) -> Optional[int]: return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor def SCREAMING_SNAKE_CASE__ ( self : List[str] , **__lowerCamelCase : Union[str, Any] ) -> Dict: return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).qformer_tokenizer def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: A : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A : Union[str, Any] = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: A : List[str] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) A : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A : List[Any] = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 ) A : Optional[int] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) self.assertIsInstance(processor.qformer_tokenizer , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: A : Any = self.get_image_processor() A : str = self.get_tokenizer() A : List[str] = self.get_qformer_tokenizer() A : List[str] = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) A : str = self.prepare_image_inputs() A : Optional[int] = image_processor(__lowerCamelCase , return_tensors="np" ) A : Union[str, Any] = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple: A : List[str] = self.get_image_processor() A : Tuple = self.get_tokenizer() A : str = self.get_qformer_tokenizer() A : List[str] = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) A : Union[str, Any] = "lower newer" A : List[str] = processor(text=__lowerCamelCase ) A : Union[str, Any] = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) A : List[str] = qformer_tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: A : List[str] = self.get_image_processor() A : List[str] = self.get_tokenizer() A : Optional[Any] = self.get_qformer_tokenizer() A : Optional[int] = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) A : Optional[Any] = "lower newer" A : Dict = self.prepare_image_inputs() A : Dict = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual( list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: A : Any = self.get_image_processor() A : int = self.get_tokenizer() A : Any = self.get_qformer_tokenizer() A : Optional[int] = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) A : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A : str = processor.batch_decode(__lowerCamelCase ) A : Tuple = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: A : Union[str, Any] = self.get_image_processor() A : int = self.get_tokenizer() A : Any = self.get_qformer_tokenizer() A : str = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) A : Optional[int] = "lower newer" A : int = self.prepare_image_inputs() A : Tuple = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual( list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
17
from math import sqrt def UpperCAmelCase ( _lowerCamelCase = 100_0000 ): A : int = 0 A : int = 0 A : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
17
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "blenderbot-small" a__ = ["past_key_values"] a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : int , __lowerCamelCase : Any=5_02_65 , __lowerCamelCase : List[str]=5_12 , __lowerCamelCase : Any=8 , __lowerCamelCase : Optional[Any]=20_48 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Union[str, Any]=8 , __lowerCamelCase : Any=20_48 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Optional[Any]=5_12 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Any=1 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=2 , __lowerCamelCase : Optional[Any]=2 , **__lowerCamelCase : Dict , ) -> Tuple: A : Dict = vocab_size A : List[str] = max_position_embeddings A : List[str] = d_model A : Any = encoder_ffn_dim A : List[Any] = encoder_layers A : Dict = encoder_attention_heads A : Optional[int] = decoder_ffn_dim A : Any = decoder_layers A : Dict = decoder_attention_heads A : List[Any] = dropout A : Dict = attention_dropout A : str = activation_dropout A : List[Any] = activation_function A : Any = init_std A : Optional[Any] = encoder_layerdrop A : Dict = decoder_layerdrop A : Tuple = use_cache A : Union[str, Any] = encoder_layers A : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class lowerCamelCase_ ( _A ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: A : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A : int = {0: "batch"} A : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: A : Dict = {0: "batch", 1: "decoder_sequence"} A : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. A : Dict = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A , A : Union[str, Any] = self.num_layers for i in range(__lowerCamelCase ): A : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} A : Any = {0: "batch", 2: "past_sequence + sequence"} else: A : Dict = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: A : Optional[int] = super().outputs else: A : Tuple = super(__lowerCamelCase , self ).outputs if self.use_past: A , A : Any = self.num_layers for i in range(__lowerCamelCase ): A : List[Any] = {0: "batch", 2: "past_sequence + sequence"} A : List[str] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: A : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs A : List[Any] = seq_length if not self.use_past else 1 A : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : List[str] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} A : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A , A : Tuple = common_inputs["input_ids"].shape A : str = common_inputs["decoder_input_ids"].shape[1] A , A : Optional[Any] = self.num_attention_heads A : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A : Optional[int] = decoder_seq_length + 3 A : Optional[int] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A : Dict = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) A : Union[str, Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A , A : Union[str, Any] = self.num_layers A : str = min(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers A : Optional[int] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. A : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: A : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A , A : Optional[Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values A : Dict = seqlen + 2 A , A : Optional[int] = self.num_layers A , A : List[str] = self.num_attention_heads A : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A : Union[str, Any] = common_inputs["attention_mask"].dtype A : Tuple = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) A : str = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A : Optional[Any] = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A : Any = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) A : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence A : List[str] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size A : Any = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: A : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": A : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: A : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ) -> Union[str, Any]: if self.task in ["default", "seq2seq-lm"]: A : Optional[Any] = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: A : Optional[Any] = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
17
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE = """.""" if __name__ == "__main__": __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE = line.strip() __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths) raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
17
1
import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : Any=2 , __lowerCamelCase : str=3 , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Optional[int]=None ) -> Optional[Any]: A : List[Any] = np.random.default_rng(__lowerCamelCase ) A : str = length A : List[Any] = rng.normal(size=(length,) ).astype(np.floataa ) A : Tuple = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Tuple ) -> List[Any]: return self.length def __getitem__( self : Any , __lowerCamelCase : List[str] ) -> Optional[Any]: return {"x": self.x[i], "y": self.y[i]} class lowerCamelCase_ ( torch.nn.Module ): '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Dict=0 , __lowerCamelCase : List[str]=False ) -> Optional[Any]: super().__init__() A : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) A : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) A : int = True def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=None ) -> int: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) A : Tuple = False return x * self.a[0] + self.b[0] class lowerCamelCase_ ( torch.nn.Module ): '''simple docstring''' def __init__( self : Any , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=False ) -> Tuple: super().__init__() A : Tuple = torch.nn.Parameter(torch.tensor(__lowerCamelCase ).float() ) A : Optional[int] = torch.nn.Parameter(torch.tensor(__lowerCamelCase ).float() ) A : Union[str, Any] = True def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Union[str, Any]=None ) -> Union[str, Any]: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) A : List[str] = False return x * self.a + self.b def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = 16 ): from datasets import load_dataset from transformers import AutoTokenizer A : int = AutoTokenizer.from_pretrained("bert-base-cased" ) A : Any = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} A : Tuple = load_dataset("csv" , data_files=_lowerCamelCase ) A : Any = datasets["train"].unique("label" ) A : List[str] = {v: i for i, v in enumerate(_lowerCamelCase )} def tokenize_function(_lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) A : Optional[Any] = tokenizer( examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) if "label" in examples: A : int = [label_to_id[l] for l in examples["label"]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A : List[str] = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["sentence1", "sentence2", "label"] , ) def collate_fn(_lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowerCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(_lowerCamelCase , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. A : Tuple = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=2 ) A : Tuple = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
17
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str: A : List[Any] = parent A : Optional[int] = batch_size A : Any = image_size A : Optional[Any] = patch_size A : Optional[Any] = num_channels A : Tuple = is_training A : Optional[Any] = use_labels A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : Any = hidden_act A : Tuple = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = scope A : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : List[Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int: A : Optional[int] = DeiTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any: A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : List[str] = 1 A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict: A : str = self.type_sequence_label_size A : List[str] = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Any = 1 A : str = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ) : Tuple = config_and_inputs A : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = DeiTModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str: A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: if not self.model_tester.is_training: return A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Dict = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Tuple = False A : Any = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A : List[str] = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : int = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): A : Tuple = problem_type["title"] A : Optional[Any] = problem_type["num_labels"] A : List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A : int = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: A : Optional[Any] = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( __lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : List[str] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : str = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) A : Dict = self.default_image_processor A : Optional[int] = prepare_img() A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A : List[str] = model(__lowerCamelCase )
17
1
from __future__ import annotations def UpperCAmelCase ( _lowerCamelCase ): # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(_lowerCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(_lowerCamelCase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
17
from sklearn.metrics import recall_score import datasets __SCREAMING_SNAKE_CASE = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ __SCREAMING_SNAKE_CASE = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ __SCREAMING_SNAKE_CASE = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]: A : str = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
17
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __SCREAMING_SNAKE_CASE = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = ["""LayoutXLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = ["""LayoutXLMTokenizerFast"""] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
from collections import deque from .hash_table import HashTable class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]: super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) A : Dict = self.values[key] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
17
1
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def UpperCAmelCase ( _lowerCamelCase ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def UpperCAmelCase ( _lowerCamelCase ): class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Dict ) -> Optional[Any]: A : Optional[int] = metric_id class lowerCamelCase_ : '''simple docstring''' a__ = [MetricMock(_A ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if "tmp_path" in args: A : Optional[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(_lowerCamelCase , match="https://huggingface.co/docs/evaluate" ): func(*_lowerCamelCase )
17
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCamelCase_ : '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict: A : Optional[Any] = 4 A : List[str] = 32 A : Any = (32, 32) A : str = torch.manual_seed(0 ) A : int = torch.device(__lowerCamelCase ) A : List[str] = (batch_size, num_channels) + sizes A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) A : int = {"hidden_states": hidden_states} if include_temb: A : Any = 1_28 A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: A : str = torch.manual_seed(1 ) A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": A : Dict = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]: A , A : str = self.prepare_init_args_and_inputs_for_common() A : List[Any] = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): A : int = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) A : Any = output[0, -1, -3:, -3:] A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.prepare_init_args_and_inputs_for_common() A : str = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Optional[int] = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Optional[Any] = output[0] A : List[str] = torch.device(__lowerCamelCase ) A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase ) A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
17
1
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE = sys.version_info >= (3, 10) def UpperCAmelCase ( _lowerCamelCase=None , _lowerCamelCase=None ): return field(default_factory=lambda: default , metadata=_lowerCamelCase ) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = 42 a__ = 42 a__ = 42 a__ = 42 @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = 42 a__ = field(default="toto" ,metadata={"help": "help message"} ) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = False a__ = True a__ = None class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "titi" a__ = "toto" class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "titi" a__ = "toto" a__ = 42 @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = "toto" def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: A : Optional[int] = BasicEnum(self.foo ) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = "toto" def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: A : Union[str, Any] = MixedTypeEnum(self.foo ) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = None a__ = field(default=_A ,metadata={"help": "help message"} ) a__ = None a__ = list_field(default=[] ) a__ = list_field(default=[] ) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = list_field(default=[] ) a__ = list_field(default=[1, 2, 3] ) a__ = list_field(default=["Hallo", "Bonjour", "Hello"] ) a__ = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = field() a__ = field() a__ = field() def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: A : List[str] = BasicEnum(self.required_enum ) @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = 42 a__ = field() a__ = None a__ = field(default="toto" ,metadata={"help": "help message"} ) a__ = list_field(default=["Hallo", "Bonjour", "Hello"] ) if is_python_no_less_than_3_10: @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = False a__ = True a__ = None @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = None a__ = field(default=_A ,metadata={"help": "help message"} ) a__ = None a__ = list_field(default=[] ) a__ = list_field(default=[] ) class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : argparse.ArgumentParser , __lowerCamelCase : argparse.ArgumentParser ) -> Optional[Any]: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A : Union[str, Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != "container"} A : str = {k: v for k, v in vars(__lowerCamelCase ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices" , __lowerCamelCase ) and yy.get("choices" , __lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](__lowerCamelCase ) , yy["type"](__lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: A : List[Any] = HfArgumentParser(__lowerCamelCase ) A : List[Any] = argparse.ArgumentParser() expected.add_argument("--foo" , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument("--bar" , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument("--baz" , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument("--flag" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="?" ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) A : Dict = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] ((A) , ) : Optional[Any] = parser.parse_args_into_dataclasses(__lowerCamelCase , look_for_args_file=__lowerCamelCase ) self.assertFalse(example.flag ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: A : str = HfArgumentParser(__lowerCamelCase ) A : Any = argparse.ArgumentParser() expected.add_argument("--foo" , default=42 , type=__lowerCamelCase ) expected.add_argument("--baz" , default="toto" , type=__lowerCamelCase , help="help message" ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A : Any = argparse.ArgumentParser() expected.add_argument("--foo" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="?" ) expected.add_argument("--baz" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="?" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("--no_baz" , action="store_false" , default=__lowerCamelCase , dest="baz" ) expected.add_argument("--opt" , type=__lowerCamelCase , default=__lowerCamelCase ) A : Union[str, Any] = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__lowerCamelCase ) for dataclass_type in dataclass_types: A : int = HfArgumentParser(__lowerCamelCase ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) A : Union[str, Any] = parser.parse_args([] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) ) A : List[str] = parser.parse_args(["--foo", "--no_baz"] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) ) A : Any = parser.parse_args(["--foo", "--baz"] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) ) A : List[Any] = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) ) A : Tuple = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]: A : List[str] = HfArgumentParser(__lowerCamelCase ) A : Any = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) A : Union[str, Any] = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) A : Tuple = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A : Optional[Any] = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) A : str = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A : Optional[int] = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) A : Optional[int] = parser.parse_args_into_dataclasses(["--foo", "42"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = "toto" A : List[str] = HfArgumentParser(__lowerCamelCase ) A : List[Any] = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) A : Tuple = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) A : int = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : List[Any] = HfArgumentParser(__lowerCamelCase ) A : List[str] = argparse.ArgumentParser() expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__lowerCamelCase ) expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__lowerCamelCase ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__lowerCamelCase ) expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__lowerCamelCase ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) A : Tuple = parser.parse_args([] ) self.assertEqual( __lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , ) A : Union[str, Any] = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() ) self.assertEqual(__lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Any = argparse.ArgumentParser() expected.add_argument("--foo" , default=__lowerCamelCase , type=__lowerCamelCase ) expected.add_argument("--bar" , default=__lowerCamelCase , type=__lowerCamelCase , help="help message" ) expected.add_argument("--baz" , default=__lowerCamelCase , type=__lowerCamelCase ) expected.add_argument("--ces" , nargs="+" , default=[] , type=__lowerCamelCase ) expected.add_argument("--des" , nargs="+" , default=[] , type=__lowerCamelCase ) A : List[str] = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__lowerCamelCase ) for dataclass_type in dataclass_types: A : Optional[Any] = HfArgumentParser(__lowerCamelCase ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) A : List[Any] = parser.parse_args([] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , bar=__lowerCamelCase , baz=__lowerCamelCase , ces=[] , des=[] ) ) A : Dict = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() ) self.assertEqual(__lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A : Dict = HfArgumentParser(__lowerCamelCase ) A : Optional[Any] = argparse.ArgumentParser() expected.add_argument("--required_list" , nargs="+" , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument("--required_str" , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__lowerCamelCase , ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: A : List[str] = HfArgumentParser(__lowerCamelCase ) A : List[Any] = argparse.ArgumentParser() expected.add_argument("--foo" , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__lowerCamelCase , ) expected.add_argument("--opt" , type=__lowerCamelCase , default=__lowerCamelCase ) expected.add_argument("--baz" , default="toto" , type=__lowerCamelCase , help="help message" ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__lowerCamelCase ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: A : Optional[Any] = HfArgumentParser(__lowerCamelCase ) A : int = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } A : List[Any] = parser.parse_dict(__lowerCamelCase )[0] A : int = BasicExample(**__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: A : List[Any] = HfArgumentParser(__lowerCamelCase ) A : str = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(__lowerCamelCase , parser.parse_dict , __lowerCamelCase , allow_extra_keys=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Optional[int] = HfArgumentParser(__lowerCamelCase ) A : List[str] = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: A : List[Any] = os.path.join(__lowerCamelCase , "temp_json" ) os.mkdir(__lowerCamelCase ) with open(temp_local_path + ".json" , "w+" ) as f: json.dump(__lowerCamelCase , __lowerCamelCase ) A : Tuple = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0] A : List[Any] = BasicExample(**__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple: A : Any = HfArgumentParser(__lowerCamelCase ) A : Optional[Any] = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: A : List[Any] = os.path.join(__lowerCamelCase , "temp_yaml" ) os.mkdir(__lowerCamelCase ) with open(temp_local_path + ".yaml" , "w+" ) as f: yaml.dump(__lowerCamelCase , __lowerCamelCase ) A : Any = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0] A : Tuple = BasicExample(**__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: A : Any = HfArgumentParser(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase )
17
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]: A : str = max_length A : Optional[int] = max_position_embeddings @add_start_docstrings(__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool: A : List[Any] = input_ids.shape[-1] A : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , ) A : str = start_length A : Optional[Any] = max_new_tokens A : Dict = start_length + max_new_tokens @add_start_docstrings(__lowerCamelCase ) def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]: A : str = max_time A : Dict = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool: return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: for stopping_criterium in self: if isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length elif isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = stopping_criteria.max_length A : Any = deepcopy(_lowerCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) ) return new_stopping_criteria
17
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class lowerCamelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False ) -> List[str]: A : List[Any] = scheduler A : int = optimizers if isinstance(__lowerCamelCase , (list, tuple) ) else [optimizers] A : Any = split_batches A : List[str] = step_with_optimizer A : Tuple = GradientState() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : str ) -> List[Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step A : str = AcceleratorState().num_processes for _ in range(__lowerCamelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , "total_steps" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) else: self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: return self.scheduler.get_last_lr() def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: return self.scheduler.state_dict() def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> str: self.scheduler.load_state_dict(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any: return self.scheduler.get_lr() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *__lowerCamelCase : str , **__lowerCamelCase : str ) -> Tuple: return self.scheduler.print_lr(*__lowerCamelCase , **__lowerCamelCase )
17
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""", } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "blip_2_vision_model" def __init__( self : Tuple , __lowerCamelCase : Any=14_08 , __lowerCamelCase : Union[str, Any]=61_44 , __lowerCamelCase : Any=39 , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : Any=2_24 , __lowerCamelCase : Tuple=14 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : str=0.00001 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Any=1e-10 , __lowerCamelCase : List[Any]=True , **__lowerCamelCase : Union[str, Any] , ) -> List[Any]: super().__init__(**__lowerCamelCase ) A : Dict = hidden_size A : Union[str, Any] = intermediate_size A : Optional[int] = num_hidden_layers A : Tuple = num_attention_heads A : List[str] = patch_size A : Any = image_size A : List[Any] = initializer_range A : Union[str, Any] = attention_dropout A : int = layer_norm_eps A : Dict = hidden_act A : Dict = qkv_bias @classmethod def SCREAMING_SNAKE_CASE__ ( cls : str , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__lowerCamelCase ) A , A : int = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": A : Tuple = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "blip_2_qformer" def __init__( self : Tuple , __lowerCamelCase : Union[str, Any]=3_05_22 , __lowerCamelCase : Union[str, Any]=7_68 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Optional[int]=30_72 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=5_12 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : List[Any]="absolute" , __lowerCamelCase : Any=2 , __lowerCamelCase : str=14_08 , **__lowerCamelCase : int , ) -> Optional[Any]: super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase ) A : List[Any] = vocab_size A : Optional[int] = hidden_size A : Dict = num_hidden_layers A : int = num_attention_heads A : Optional[Any] = hidden_act A : List[str] = intermediate_size A : Optional[Any] = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : Tuple = max_position_embeddings A : Optional[int] = initializer_range A : Optional[Any] = layer_norm_eps A : Optional[int] = position_embedding_type A : List[str] = cross_attention_frequency A : Union[str, Any] = encoder_hidden_size @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[int] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__lowerCamelCase ) A , A : Union[str, Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": A : Optional[int] = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "blip-2" a__ = True def __init__( self : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=32 , **__lowerCamelCase : Any ) -> Optional[int]: super().__init__(**__lowerCamelCase ) if vision_config is None: A : List[str] = {} logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." ) if qformer_config is None: A : Dict = {} logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." ) if text_config is None: A : Any = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) A : List[Any] = BlipaVisionConfig(**__lowerCamelCase ) A : List[Any] = BlipaQFormerConfig(**__lowerCamelCase ) A : Union[str, Any] = text_config["model_type"] if "model_type" in text_config else "opt" A : Tuple = CONFIG_MAPPING[text_model_type](**__lowerCamelCase ) A : Tuple = self.text_config.tie_word_embeddings A : Union[str, Any] = self.text_config.is_encoder_decoder A : Union[str, Any] = num_query_tokens A : str = self.vision_config.hidden_size A : int = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES A : Any = 1.0 A : Optional[int] = 0.02 @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , __lowerCamelCase : BlipaVisionConfig , __lowerCamelCase : BlipaQFormerConfig , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : Tuple , ) -> Tuple: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: A : str = copy.deepcopy(self.__dict__ ) A : List[str] = self.vision_config.to_dict() A : int = self.qformer_config.to_dict() A : Dict = self.text_config.to_dict() A : Optional[int] = self.__class__.model_type return output
17
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
1
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
1
def UpperCAmelCase ( _lowerCamelCase = 100 ): A : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6 A : List[str] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
17
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int: A : Optional[int] = parent A : List[str] = batch_size A : Tuple = image_size A : List[str] = num_channels A : List[str] = embeddings_size A : List[str] = hidden_sizes A : str = depths A : Optional[Any] = is_training A : int = use_labels A : Optional[int] = hidden_act A : List[Any] = num_labels A : List[str] = scope A : str = len(__lowerCamelCase ) A : Optional[int] = out_features A : str = out_indices A : Optional[int] = num_groups def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple: A : Union[str, Any] = self.num_labels A : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]: A : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A : Optional[Any] = None A : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : List[str] = self.prepare_config_and_inputs() A , A , A : Tuple = config_and_inputs A : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Any = BitModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return @unittest.skip(reason="Bit does not output attentions" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) A : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Optional[Any] = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ): A : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Union[str, Any] = BitModelTester(self )
17
1
from manim import * class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : Dict = Rectangle(height=0.5 , width=0.5 ) A : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) A : Tuple = [mem.copy() for i in range(6 )] A : List[Any] = [mem.copy() for i in range(6 )] A : Optional[int] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) A : Optional[int] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) A : int = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) A : Tuple = Text("CPU" , font_size=24 ) A : Tuple = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCamelCase ) A : Any = [mem.copy() for i in range(4 )] A : List[Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) A : List[str] = Text("GPU" , font_size=24 ) A : List[Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCamelCase ) A : int = [mem.copy() for i in range(6 )] A : Optional[Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) A : Optional[Any] = Text("Model" , font_size=24 ) A : int = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCamelCase ) A : List[str] = [] for i, rect in enumerate(__lowerCamelCase ): rect.set_stroke(__lowerCamelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) A : Optional[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 ) self.add(__lowerCamelCase ) cpu_targs.append(__lowerCamelCase ) A : Any = [mem.copy() for i in range(6 )] A : str = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) A : List[Any] = Text("Loaded Checkpoint" , font_size=24 ) A : List[str] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) A : int = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A : Union[str, Any] = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCamelCase , __lowerCamelCase ) A : List[Any] = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) A : Dict = MarkupText( F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) ) self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) ) A : int = [] A : str = [] for i, rect in enumerate(__lowerCamelCase ): A : List[Any] = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 ) target.move_to(__lowerCamelCase ) first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) ) A : Optional[int] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) ) self.play(*__lowerCamelCase ) self.play(*__lowerCamelCase ) self.wait()
17
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
1
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def UpperCAmelCase ( _lowerCamelCase ): def wrapper(*_lowerCamelCase , **_lowerCamelCase ): A : List[str] = timeit.default_timer() A : Dict = func(*_lowerCamelCase , **_lowerCamelCase ) A : Optional[Any] = timeit.default_timer() - starttime return delta A : Optional[int] = func.__name__ return wrapper def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=100 , _lowerCamelCase=None ): A : Union[str, Any] = [] A : Any = seq_shapes or {} for i in range(_lowerCamelCase ): A : List[str] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(_lowerCamelCase , _ArrayXD ): A : int = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(_lowerCamelCase , datasets.Value ): if v.dtype == "string": A : Dict = "The small grey turtle was surprisingly fast when challenged." else: A : Optional[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(_lowerCamelCase , datasets.Sequence ): while isinstance(_lowerCamelCase , datasets.Sequence ): A : Optional[Any] = v.feature A : Union[str, Any] = seq_shapes[k] A : Union[str, Any] = np.random.rand(*_lowerCamelCase ).astype(v.dtype ) A : List[Any] = data dummy_data.append((i, example) ) return dummy_data def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=100 , _lowerCamelCase=None ): A : Any = generate_examples(_lowerCamelCase , num_examples=_lowerCamelCase , seq_shapes=_lowerCamelCase ) with ArrowWriter(features=_lowerCamelCase , path=_lowerCamelCase ) as writer: for key, record in dummy_data: A : int = features.encode_example(_lowerCamelCase ) writer.write(_lowerCamelCase ) A , A : Optional[Any] = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) A : List[Any] = datasets.Dataset.from_file(filename=_lowerCamelCase , info=datasets.DatasetInfo(features=_lowerCamelCase ) ) return dataset
17
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
1
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __SCREAMING_SNAKE_CASE = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = True a__ = None # Automatically constructed a__ = "PIL.Image.Image" a__ = pa.struct({"bytes": pa.binary(), "path": pa.string()} ) a__ = field(default="Image" ,init=_A ,repr=_A ) def __call__( self : str ) -> Union[str, Any]: return self.pa_type def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Tuple = np.array(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): return {"path": value, "bytes": None} elif isinstance(__lowerCamelCase , __lowerCamelCase ): return {"path": None, "bytes": value} elif isinstance(__lowerCamelCase , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(__lowerCamelCase ) elif isinstance(__lowerCamelCase , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(__lowerCamelCase ) elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : int=None ) -> "PIL.Image.Image": if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support decoding images, please install 'Pillow'." ) if token_per_repo_id is None: A : List[Any] = {} A , A : Any = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(__lowerCamelCase ): A : List[Any] = PIL.Image.open(__lowerCamelCase ) else: A : Any = path.split("::" )[-1] try: A : int = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"] A : Tuple = token_per_repo_id.get(__lowerCamelCase ) except ValueError: A : Any = None with xopen(__lowerCamelCase , "rb" , use_auth_token=__lowerCamelCase ) as f: A : Optional[Any] = BytesIO(f.read() ) A : List[Any] = PIL.Image.open(bytes_ ) else: A : Union[str, Any] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return ( self if self.decode else { "bytes": Value("binary" ), "path": Value("string" ), } ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray: if pa.types.is_string(storage.type ): A : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() ) A : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): A : Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) A : int = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: A : Optional[int] = storage.field("bytes" ) else: A : Optional[Any] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: A : Optional[Any] = storage.field("path" ) else: A : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) A : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): A : int = pa.array( [encode_np_array(np.array(__lowerCamelCase ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) A : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) A : List[str] = pa.StructArray.from_arrays( [bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(__lowerCamelCase , self.pa_type ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : pa.StructArray ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(__lowerCamelCase : Optional[int] ): with xopen(__lowerCamelCase , "rb" ) as f: A : Union[str, Any] = f.read() return bytes_ A : int = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) A : List[str] = pa.array( [os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) A : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(__lowerCamelCase , self.pa_type ) def UpperCAmelCase ( ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() A : Any = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def UpperCAmelCase ( _lowerCamelCase ): A : int = BytesIO() if image.format in list_image_compression_formats(): A : str = image.format else: A : List[Any] = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF" image.save(_lowerCamelCase , format=_lowerCamelCase ) return buffer.getvalue() def UpperCAmelCase ( _lowerCamelCase ): if hasattr(_lowerCamelCase , "filename" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(_lowerCamelCase )} def UpperCAmelCase ( _lowerCamelCase ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) A : Tuple = array.dtype A : Union[str, Any] = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER A : Union[str, Any] = dtype.kind A : List[Any] = dtype.itemsize A : int = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: A : Dict = np.dtype("|u1" ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: A : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: A : int = dtype_byteorder + dtype_kind + str(_lowerCamelCase ) A : int = np.dtype(_lowerCamelCase ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) A : str = PIL.Image.fromarray(array.astype(_lowerCamelCase ) ) return {"path": None, "bytes": image_to_bytes(_lowerCamelCase )} def UpperCAmelCase ( _lowerCamelCase ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if objs: A , A : Dict = first_non_null_value(_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(_lowerCamelCase , np.ndarray ): A : int = no_op_if_value_is_null(_lowerCamelCase ) return [obj_to_image_dict_func(_lowerCamelCase ) for obj in objs] elif isinstance(_lowerCamelCase , PIL.Image.Image ): A : Optional[Any] = no_op_if_value_is_null(_lowerCamelCase ) return [obj_to_image_dict_func(_lowerCamelCase ) for obj in objs] else: return objs else: return objs
17
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (PNDMScheduler,) a__ = (("num_inference_steps", 50),) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]: A : Union[str, Any] = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple: A : Dict = dict(self.forward_default_kwargs ) A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : Union[str, Any] = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Any = self.get_scheduler_config(**__lowerCamelCase ) A : int = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : Dict = scheduler_class.from_pretrained(__lowerCamelCase ) new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str: A : List[str] = dict(self.forward_default_kwargs ) A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : List[str] = self.dummy_sample A : Any = 0.1 * sample A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Tuple = self.get_scheduler_config() A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : str = scheduler_class.from_pretrained(__lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[:] A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]: A : Optional[Any] = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(**__lowerCamelCase ) A : str = scheduler_class(**__lowerCamelCase ) A : List[str] = 10 A : Union[str, Any] = self.dummy_model() A : int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A : Tuple = model(__lowerCamelCase , __lowerCamelCase ) A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: A : Union[str, Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) A : List[Any] = self.dummy_sample A : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCamelCase ) elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ): A : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A : str = 27 for scheduler_class in self.scheduler_classes: A : Tuple = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : Optional[Any] = self.full_loop() A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Any = self.full_loop(prediction_type="v_prediction" ) A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) A : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
17
1
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
17
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( _A ,_A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = StableDiffusionInstructPixaPixPipeline a__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS a__ = IMAGE_TO_IMAGE_IMAGE_PARAMS a__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]: torch.manual_seed(0 ) A : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) A : str = PNDMScheduler(skip_prk_steps=__lowerCamelCase ) torch.manual_seed(0 ) A : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) A : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) A : Union[str, Any] = CLIPTextModel(__lowerCamelCase ) A : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A : List[str] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=0 ) -> Optional[Any]: A : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] A : Any = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ) if str(__lowerCamelCase ).startswith("mps" ): A : str = torch.manual_seed(__lowerCamelCase ) else: A : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) A : int = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: A : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator A : Any = self.get_dummy_components() A : Tuple = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) A : Union[str, Any] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : int = self.get_dummy_inputs(__lowerCamelCase ) A : List[str] = sd_pipe(**__lowerCamelCase ).images A : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A : Optional[int] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: A : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator A : Tuple = self.get_dummy_components() A : int = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) A : Union[str, Any] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) A : Tuple = "french fries" A : List[str] = sd_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase ) A : Dict = output.images A : str = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A : Optional[Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int: A : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator A : int = self.get_dummy_components() A : str = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) A : List[Any] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Optional[Any] = self.get_dummy_inputs(__lowerCamelCase ) A : List[Any] = [inputs["prompt"]] * 2 A : Any = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 A : List[str] = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase ) A : Union[str, Any] = image / 2 + 0.5 A : Tuple = image.permute(0 , 3 , 1 , 2 ) A : int = image.repeat(2 , 1 , 1 , 1 ) A : Union[str, Any] = sd_pipe(**__lowerCamelCase ).images A : Optional[int] = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) A : List[Any] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]: A : int = "cpu" # ensure determinism for the device-dependent torch.Generator A : Dict = self.get_dummy_components() A : Dict = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" ) A : str = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) A : str = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Optional[int] = self.get_dummy_inputs(__lowerCamelCase ) A : Dict = sd_pipe(**__lowerCamelCase ).images A : List[str] = image[0, -3:, -3:, -1] A : List[Any] = [round(__lowerCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(__lowerCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) A : Tuple = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> int: A : Optional[int] = self.get_dummy_components() A : List[Any] = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) A : Union[str, Any] = VaeImageProcessor(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase ) A : Union[str, Any] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) A : Dict = pipe(**self.get_dummy_inputs_by_type(__lowerCamelCase , input_image_type="pt" ) )[0] A : Any = components["vae"] A : List[Any] = self.get_dummy_inputs_by_type(__lowerCamelCase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): A : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode() A : Optional[int] = pipe(**__lowerCamelCase )[0] A : Dict = np.abs(out - out_latents_inputs ).max() self.assertLess(__lowerCamelCase , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str=0 ) -> List[str]: A : str = torch.manual_seed(__lowerCamelCase ) A : Tuple = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) A : List[str] = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() A : Union[str, Any] = self.get_inputs() A : Dict = pipe(**__lowerCamelCase ).images A : List[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) A : Any = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple: A : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase ) A : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() A : Union[str, Any] = self.get_inputs() A : List[Any] = pipe(**__lowerCamelCase ).images A : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) A : Any = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: A : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase ) A : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() A : int = self.get_inputs() A : List[str] = pipe(**__lowerCamelCase ).images A : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) A : int = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: A : List[Any] = 0 def callback_fn(__lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor ) -> None: A : str = True nonlocal number_of_steps number_of_steps += 1 if step == 1: A : str = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) A : Optional[Any] = latents[0, -3:, -3:, -1] A : int = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: A : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) A : Optional[int] = latents[0, -3:, -3:, -1] A : Union[str, Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 A : Optional[int] = False A : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa ) A : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() A : Optional[Any] = self.get_inputs() pipe(**__lowerCamelCase , callback=__lowerCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa ) A : Optional[int] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A : str = self.get_inputs() A : int = pipe(**__lowerCamelCase ) A : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : Optional[Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 A : int = inputs["image"].resize((5_04, 5_04) ) A : Optional[Any] = "timbrooks/instruct-pix2pix" A : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( __lowerCamelCase , safety_checker=__lowerCamelCase , ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() A : int = pipe(**__lowerCamelCase ) A : Optional[Any] = output.images[0] A : Tuple = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 5_04, 3) A : Dict = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
17
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Dict = args.log_outputs A : Tuple = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric A : int = load_metric("wer" ) A : List[Any] = load_metric("cer" ) # compute metrics A : Tuple = wer.compute(references=result["target"] , predictions=result["prediction"] ) A : Dict = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results A : List[Any] = f"""WER: {wer_result}\nCER: {cer_result}""" print(_lowerCamelCase ) with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f: f.write(_lowerCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A : Dict = f"""log_{dataset_id}_predictions.txt""" A : List[str] = f"""log_{dataset_id}_targets.txt""" with open(_lowerCamelCase , "w" ) as p, open(_lowerCamelCase , "w" ) as t: # mapping function to write output def write_to_file(_lowerCamelCase , _lowerCamelCase ): p.write(f"""{i}""" + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f"""{i}""" + "\n" ) t.write(batch["target"] + "\n" ) result.map(_lowerCamelCase , with_indices=_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): A : Any = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A : Tuple = re.sub(_lowerCamelCase , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A : Union[str, Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: A : Optional[Any] = " ".join(text.split(_lowerCamelCase ) ) return text def UpperCAmelCase ( _lowerCamelCase ): # load dataset A : List[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_lowerCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) A : List[str] = feature_extractor.sampling_rate # resample audio A : Tuple = dataset.cast_column("audio" , Audio(sampling_rate=_lowerCamelCase ) ) # load eval pipeline if args.device is None: A : Optional[Any] = 0 if torch.cuda.is_available() else -1 A : Any = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_lowerCamelCase ): A : Any = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A : Union[str, Any] = prediction["text"] A : Dict = normalize_text(batch["sentence"] ) return batch # run inference on all examples A : int = dataset.map(_lowerCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
17
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _lowerCamelCase = 3 ): if isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("number of qubits must be a integer." ) if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0." ) if math.floor(_lowerCamelCase ) != number_of_qubits: raise ValueError("number of qubits must be exact integer." ) if number_of_qubits > 10: raise ValueError("number of qubits too large to simulate(>10)." ) A : Union[str, Any] = QuantumRegister(_lowerCamelCase , "qr" ) A : int = ClassicalRegister(_lowerCamelCase , "cr" ) A : Optional[Any] = QuantumCircuit(_lowerCamelCase , _lowerCamelCase ) A : List[str] = number_of_qubits for i in range(_lowerCamelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_lowerCamelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCamelCase , _lowerCamelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_lowerCamelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_lowerCamelCase , _lowerCamelCase ) # simulate with 10000 shots A : List[Any] = Aer.get_backend("qasm_simulator" ) A : Tuple = execute(_lowerCamelCase , _lowerCamelCase , shots=1_0000 ) return job.result().get_counts(_lowerCamelCase ) if __name__ == "__main__": print( F"""Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}""" )
17
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: A : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Union[str, Any] = None ops.enable_eager_execution_internal() A : Tuple = tf.config.list_physical_devices("CPU" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) A : Dict = tf.config.list_logical_devices(device_type="CPU" ) A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): A : Optional[int] = GradientAccumulator() A : Tuple = tf.Variable([4.0, 3.0] ) A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 ) A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowerCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ): with strategy.scope(): A : int = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
17
1
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __SCREAMING_SNAKE_CASE = """\ @inproceedings{popovic-2015-chrf, title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\", month = sep, year = \"2015\", address = \"Lisbon, Portugal\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W15-3049\", doi = \"10.18653/v1/W15-3049\", pages = \"392--395\", } @inproceedings{popovic-2017-chrf, title = \"chr{F}++: words helping character n-grams\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Second Conference on Machine Translation\", month = sep, year = \"2017\", address = \"Copenhagen, Denmark\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W17-4770\", doi = \"10.18653/v1/W17-4770\", pages = \"612--618\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __SCREAMING_SNAKE_CASE = """\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. """ __SCREAMING_SNAKE_CASE = """ Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: 'score' (float): The chrF (chrF++) score, 'char_order' (int): The character n-gram order, 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, 'beta' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[ "https://github.com/m-popovic/chrF", ] , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int = CHRF.CHAR_ORDER , __lowerCamelCase : int = CHRF.WORD_ORDER , __lowerCamelCase : int = CHRF.BETA , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , ) -> Dict: A : Any = len(references[0] ) if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A : str = [[refs[i] for refs in references] for i in range(__lowerCamelCase )] A : List[Any] = CHRF(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : List[str] = sb_chrf.corpus_score(__lowerCamelCase , __lowerCamelCase ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
17
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Optional[Any]=1 ) -> Union[str, Any]: # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str ) -> Optional[int]: TrainingJobAnalytics(__lowerCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any: # create estimator A : Optional[Any] = self.create_estimator() # run training estimator.fit() # result dataframe A : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis A : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) A : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping A : List[Any] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase )
17
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __SCREAMING_SNAKE_CASE = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase ( ): A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json" A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys() return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) ) def UpperCAmelCase ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : List[Any] = Path(_lowerCamelCase ) / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): init_hf_modules() A : Tuple = Path(_lowerCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A : Optional[int] = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Union[str, Any] = f.read() # Imports of the form `import .xxx` A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(_lowerCamelCase ) ) def UpperCAmelCase ( _lowerCamelCase ): A : Optional[int] = False A : Tuple = [module_file] A : Optional[int] = [] # Let's recurse through all relative imports while not no_change: A : Optional[Any] = [] for f in files_to_check: new_imports.extend(get_relative_imports(_lowerCamelCase ) ) A : Optional[Any] = Path(_lowerCamelCase ).parent A : List[str] = [str(module_path / m ) for m in new_imports] A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports] A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files] A : Tuple = len(_lowerCamelCase ) == 0 all_relative_imports.extend(_lowerCamelCase ) return all_relative_imports def UpperCAmelCase ( _lowerCamelCase ): with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: A : Dict = f.read() # Imports of the form `import xxx` A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE ) # Only keep the top-level module A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all A : Any = list(set(_lowerCamelCase ) ) A : Tuple = [] for imp in imports: try: importlib.import_module(_lowerCamelCase ) except ImportError: missing_packages.append(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" ) return get_relative_imports(_lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : int = module_path.replace(os.path.sep , "." ) A : Optional[Any] = importlib.import_module(_lowerCamelCase ) if class_name is None: return find_pipeline_class(_lowerCamelCase ) return getattr(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase ): from ..pipelines import DiffusionPipeline A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) ) A : Union[str, Any] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _lowerCamelCase ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) A : Any = cls return pipeline_class def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ): A : List[Any] = str(_lowerCamelCase ) A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ): A : Union[str, Any] = module_file_or_url A : Any = "local" elif pretrained_model_name_or_path.count("/" ) == 0: A : Optional[Any] = get_diffusers_versions() # cut ".dev0" A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: A : List[Any] = latest_version if latest_version[1:] in available_versions else "main" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: A : Optional[Any] = f"""v{revision}""" elif revision == "main": A : Dict = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {", ".join(available_versions + ["main"] )}.""" ) # community pipeline on GitHub A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase ) try: A : Optional[int] = cached_download( _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = "git" A : Any = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached A : Any = hf_hub_download( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , ) A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment A : List[str] = check_imports(_lowerCamelCase ) # Now we move the module inside our cached dynamic modules. A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_lowerCamelCase ) A : Optional[int] = Path(_lowerCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_lowerCamelCase , submodule_path / module_file ) for module_needed in modules_needed: A : int = f"""{module_needed}.py""" shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = use_auth_token elif use_auth_token is True: A : Dict = HfFolder.get_token() else: A : Tuple = None A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. A : str = submodule_path / commit_hash A : List[str] = full_submodule + os.path.sep + commit_hash create_dynamic_module(_lowerCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(_lowerCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return os.path.join(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ): A : int = get_cached_module_file( _lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , ) return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) )
17
1
class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ) -> List[Any]: A : List[Any] = name A : str = value A : List[str] = weight def __repr__( self : str ) -> str: return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})""" def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]: return self.value def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return self.name def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: return self.weight def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str: return self.value / self.weight def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = [] for i in range(len(_lowerCamelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Any = sorted(_lowerCamelCase , key=_lowerCamelCase , reverse=_lowerCamelCase ) A : Dict = [] A , A : int = 0.0, 0.0 for i in range(len(_lowerCamelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCAmelCase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
17
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) __SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict: A : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , ) A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history if not do_eval: return A : List[Any] = [log for log in logs if "eval_loss" in log.keys()] A : Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A : List[str] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: self.run_seqaseq_quick( distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout A : Dict = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } A : List[str] = experiments[experiment_id] A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} A : Union[str, Any] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] ) A : Dict = len(re.findall(__lowerCamelCase , cl.err ) ) self.assertEqual(__lowerCamelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : int = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , ) # Check metrics A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history A : Dict = [log for log in logs if "eval_loss" in log.keys()] A : Dict = eval_metrics[0] A : int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase ) # test if do_predict saves generations and metrics A : Optional[Any] = os.listdir(__lowerCamelCase ) A : Any = {os.path.basename(__lowerCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]: A : Optional[int] = "--skip_memory_metrics 0" A : str = self.run_trainer( max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , ) # Check metrics A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) A : int = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A : int = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A : Tuple = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( __lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( __lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]: A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" A : Optional[int] = self.get_auto_remove_tmp_dir() A : int = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() A : Optional[Any] = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCamelCase )} """.split() A : Optional[Any] = "\n --do_predict\n ".split() A : Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A : Dict = get_gpu_count() A : Any = get_torch_dist_unique_port() A : Optional[Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() A : Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCamelCase , env=self.get_env() ) else: A : List[Any] = ["run_translation.py"] + args with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): main() return output_dir
17
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "rwkv" a__ = {"max_position_embeddings": "context_length"} def __init__( self : Optional[Any] , __lowerCamelCase : List[str]=5_02_77 , __lowerCamelCase : str=10_24 , __lowerCamelCase : Optional[Any]=40_96 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=1e-5 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : str=6 , __lowerCamelCase : List[str]=False , __lowerCamelCase : int=True , **__lowerCamelCase : Any , ) -> str: A : List[str] = vocab_size A : int = context_length A : int = hidden_size A : str = num_hidden_layers A : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size A : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size A : Any = layer_norm_epsilon A : str = rescale_every A : List[str] = use_cache A : Any = bos_token_id A : Dict = eos_token_id super().__init__( tie_word_embeddings=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
17
from collections.abc import Sequence def UpperCAmelCase ( _lowerCamelCase = None ): if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A : Dict = nums[0] for i in range(1 , len(_lowerCamelCase ) ): A : Tuple = nums[i] A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip()) __SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
17
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = field(default="image-classification" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"image": Image()} ) a__ = Features({"labels": ClassLabel} ) a__ = "image" a__ = "labels" def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : int ) -> int: if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , __lowerCamelCase ): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" ) A : Optional[int] = copy.deepcopy(self ) A : Union[str, Any] = self.label_schema.copy() A : List[Any] = features[self.label_column] A : List[Any] = label_schema return task_template @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict[str, str]: return { self.image_column: "image", self.label_column: "labels", }
17
from math import sqrt def UpperCAmelCase ( _lowerCamelCase = 100_0000 ): A : int = 0 A : int = 0 A : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
17
1
from __future__ import annotations from fractions import Fraction def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def UpperCAmelCase ( _lowerCamelCase ): A : Union[str, Any] = [] A : List[Any] = 11 A : int = int("1" + "0" * digit_len ) for num in range(_lowerCamelCase , _lowerCamelCase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ): solutions.append(f"""{num}/{den}""" ) den += 1 num += 1 A : Tuple = 10 return solutions def UpperCAmelCase ( _lowerCamelCase = 2 ): A : Optional[Any] = 1.0 for fraction in fraction_list(_lowerCamelCase ): A : Dict = Fraction(_lowerCamelCase ) result *= frac.denominator / frac.numerator return int(_lowerCamelCase ) if __name__ == "__main__": print(solution())
17
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE = """.""" if __name__ == "__main__": __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE = line.strip() __SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths) raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
17
1
from __future__ import annotations import math def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if depth < 0: raise ValueError("Depth cannot be less than 0" ) if not scores: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , ) ) def UpperCAmelCase ( ): A : Optional[Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423] A : int = math.log(len(_lowerCamelCase ) , 2 ) print(f"""Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
17
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str: A : List[Any] = parent A : Optional[int] = batch_size A : Any = image_size A : Optional[Any] = patch_size A : Optional[Any] = num_channels A : Tuple = is_training A : Optional[Any] = use_labels A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : Any = hidden_act A : Tuple = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = scope A : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : List[Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int: A : Optional[int] = DeiTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any: A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : List[str] = 1 A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict: A : str = self.type_sequence_label_size A : List[str] = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Any = 1 A : str = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ) : Tuple = config_and_inputs A : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = DeiTModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str: A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: if not self.model_tester.is_training: return A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Dict = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Tuple = False A : Any = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A : List[str] = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : int = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): A : Tuple = problem_type["title"] A : Optional[Any] = problem_type["num_labels"] A : List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A : int = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: A : Optional[Any] = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( __lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : List[str] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : str = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) A : Dict = self.default_image_processor A : Optional[int] = prepare_img() A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A : List[str] = model(__lowerCamelCase )
17
1
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __SCREAMING_SNAKE_CASE = """src/diffusers""" __SCREAMING_SNAKE_CASE = """.""" # This is to make sure the diffusers module imported is the one in the repo. __SCREAMING_SNAKE_CASE = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) __SCREAMING_SNAKE_CASE = spec.loader.load_module() def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): return line.startswith(_lowerCamelCase ) or len(_lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , _lowerCamelCase ) is not None def UpperCAmelCase ( _lowerCamelCase ): A : Dict = object_name.split("." ) A : Tuple = 0 # First let's find the module where our object lives. A : Tuple = parts[i] while i < len(_lowerCamelCase ) and not os.path.isfile(os.path.join(_lowerCamelCase , f"""{module}.py""" ) ): i += 1 if i < len(_lowerCamelCase ): A : int = os.path.join(_lowerCamelCase , parts[i] ) if i >= len(_lowerCamelCase ): raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(_lowerCamelCase , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f: A : Tuple = f.readlines() # Now let's find the class / func in the code! A : Any = "" A : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(_lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(_lowerCamelCase ): raise ValueError(f""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). A : Optional[int] = line_index while line_index < len(_lowerCamelCase ) and _should_continue(lines[line_index] , _lowerCamelCase ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 A : int = lines[start_index:line_index] return "".join(_lowerCamelCase ) __SCREAMING_SNAKE_CASE = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") __SCREAMING_SNAKE_CASE = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""") __SCREAMING_SNAKE_CASE = re.compile(r"""<FILL\s+[^>]*>""") def UpperCAmelCase ( _lowerCamelCase ): A : List[str] = code.split("\n" ) A : List[str] = 0 while idx < len(_lowerCamelCase ) and len(lines[idx] ) == 0: idx += 1 if idx < len(_lowerCamelCase ): return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0] return "" def UpperCAmelCase ( _lowerCamelCase ): A : List[Any] = len(get_indent(_lowerCamelCase ) ) > 0 if has_indent: A : Tuple = f"""class Bla:\n{code}""" A : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_lowerCamelCase ) A : Any = black.format_str(_lowerCamelCase , mode=_lowerCamelCase ) A , A : Dict = style_docstrings_in_code(_lowerCamelCase ) return result[len("class Bla:\n" ) :] if has_indent else result def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=False ): with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f: A : Dict = f.readlines() A : List[Any] = [] A : List[str] = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(_lowerCamelCase ): A : Union[str, Any] = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. A , A , A : Optional[int] = search.groups() A : str = find_code_in_diffusers(_lowerCamelCase ) A : Optional[int] = get_indent(_lowerCamelCase ) A : int = line_index + 1 if indent == theoretical_indent else line_index + 2 A : Union[str, Any] = theoretical_indent A : Dict = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. A : Dict = True while line_index < len(_lowerCamelCase ) and should_continue: line_index += 1 if line_index >= len(_lowerCamelCase ): break A : Dict = lines[line_index] A : Union[str, Any] = _should_continue(_lowerCamelCase , _lowerCamelCase ) and re.search(f"""^{indent}# End copy""" , _lowerCamelCase ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 A : str = lines[start_index:line_index] A : str = "".join(_lowerCamelCase ) # Remove any nested `Copied from` comments to avoid circular copies A : Any = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_lowerCamelCase ) is None] A : Any = "\n".join(_lowerCamelCase ) # Before comparing, use the `replace_pattern` on the original code. if len(_lowerCamelCase ) > 0: A : int = replace_pattern.replace("with" , "" ).split("," ) A : Optional[int] = [_re_replace_pattern.search(_lowerCamelCase ) for p in patterns] for pattern in patterns: if pattern is None: continue A , A , A : List[str] = pattern.groups() A : str = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if option.strip() == "all-casing": A : Optional[int] = re.sub(obja.lower() , obja.lower() , _lowerCamelCase ) A : List[str] = re.sub(obja.upper() , obja.upper() , _lowerCamelCase ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line A : Optional[int] = blackify(lines[start_index - 1] + theoretical_code ) A : Dict = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: A : int = lines[:start_index] + [theoretical_code] + lines[line_index:] A : Optional[int] = start_index + 1 if overwrite and len(_lowerCamelCase ) > 0: # Warn the user a file has been modified. print(f"""Detected changes, rewriting {filename}.""" ) with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(_lowerCamelCase ) return diffs def UpperCAmelCase ( _lowerCamelCase = False ): A : List[Any] = glob.glob(os.path.join(_lowerCamelCase , "**/*.py" ) , recursive=_lowerCamelCase ) A : Any = [] for filename in all_files: A : Dict = is_copy_consistent(_lowerCamelCase , _lowerCamelCase ) diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(_lowerCamelCase ) > 0: A : List[str] = "\n".join(_lowerCamelCase ) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") __SCREAMING_SNAKE_CASE = parser.parse_args() check_copies(args.fix_and_overwrite)
17
from sklearn.metrics import recall_score import datasets __SCREAMING_SNAKE_CASE = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ __SCREAMING_SNAKE_CASE = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ __SCREAMING_SNAKE_CASE = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]: A : str = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
17
1
import datasets __SCREAMING_SNAKE_CASE = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ __SCREAMING_SNAKE_CASE = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ __SCREAMING_SNAKE_CASE = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]: return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
17
from collections import deque from .hash_table import HashTable class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]: super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) A : Dict = self.values[key] def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
17
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCamelCase_ : '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict: A : Optional[Any] = 4 A : List[str] = 32 A : Any = (32, 32) A : str = torch.manual_seed(0 ) A : int = torch.device(__lowerCamelCase ) A : List[str] = (batch_size, num_channels) + sizes A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ) A : int = {"hidden_states": hidden_states} if include_temb: A : Any = 1_28 A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase ) if include_res_hidden_states_tuple: A : str = torch.manual_seed(1 ) A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),) if include_encoder_hidden_states: A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase ) if include_skip_sample: A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase ) return dummy_input def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: A : Dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 1_28, } if self.block_type == "up": A : Dict = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]: A , A : str = self.prepare_init_args_and_inputs_for_common() A : List[Any] = self.block_class(**__lowerCamelCase ) unet_block.to(__lowerCamelCase ) unet_block.eval() with torch.no_grad(): A : int = unet_block(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) A : Any = output[0, -1, -3:, -3:] A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase ) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.prepare_init_args_and_inputs_for_common() A : str = self.block_class(**__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Optional[int] = model(**__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): A : Optional[Any] = output[0] A : List[str] = torch.device(__lowerCamelCase ) A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase ) A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase ) loss.backward()
17
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "vivit" def __init__( self : str , __lowerCamelCase : Dict=2_24 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : int=[2, 16, 16] , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=7_68 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Any=30_72 , __lowerCamelCase : List[str]="gelu_fast" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Optional[Any]=1e-06 , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : str , ) -> List[str]: A : Union[str, Any] = hidden_size A : str = num_hidden_layers A : int = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Optional[int] = attention_probs_dropout_prob A : int = initializer_range A : List[Any] = layer_norm_eps A : Optional[Any] = image_size A : Union[str, Any] = num_frames A : Any = tubelet_size A : int = num_channels A : List[str] = qkv_bias super().__init__(**__lowerCamelCase )
17
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]: A : str = max_length A : Optional[int] = max_position_embeddings @add_start_docstrings(__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool: A : List[Any] = input_ids.shape[-1] A : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , ) A : str = start_length A : Optional[Any] = max_new_tokens A : Dict = start_length + max_new_tokens @add_start_docstrings(__lowerCamelCase ) def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]: A : str = max_time A : Dict = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCamelCase ) def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool: return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: for stopping_criterium in self: if isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length elif isinstance(__lowerCamelCase , __lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[int] = stopping_criteria.max_length A : Any = deepcopy(_lowerCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) ) return new_stopping_criteria
17
1
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : Any = os.path.abspath(_lowerCamelCase ) logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model A : Any = tf.train.list_variables(_lowerCamelCase ) A : Optional[int] = [] A : Tuple = [] A : List[Any] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") A : Tuple = full_name.split("/" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(f"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' A : Tuple = name[1:] # figure out how many levels deep the name is A : Tuple = 0 for _name in name: if _name.startswith("layer_with_weights" ): depth += 1 else: break layer_depth.append(_lowerCamelCase ) # read data A : Union[str, Any] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase ) names.append("/".join(_lowerCamelCase ) ) arrays.append(_lowerCamelCase ) logger.info(f"""Read a total of {len(_lowerCamelCase ):,} layers""" ) # Sanity check if len(set(_lowerCamelCase ) ) != 1: raise ValueError(f"""Found layer names with different depths (layer depth {list(set(_lowerCamelCase ) )})""" ) A : Tuple = list(set(_lowerCamelCase ) )[0] if layer_depth != 1: raise ValueError( "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP" " heads." ) # convert layers logger.info("Converting weights..." ) for full_name, array in zip(_lowerCamelCase , _lowerCamelCase ): A : Dict = full_name.split("/" ) A : Any = model A : List[str] = [] for i, m_name in enumerate(_lowerCamelCase ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("layer_with_weights" ): A : Optional[int] = int(m_name.split("-" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["embeddings", "LayerNorm"] ) A : Dict = getattr(_lowerCamelCase , "embeddings" ) A : List[str] = getattr(_lowerCamelCase , "LayerNorm" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["encoder", "layer", str(layer_num - 4 )] ) A : Any = getattr(_lowerCamelCase , "encoder" ) A : str = getattr(_lowerCamelCase , "layer" ) A : Union[str, Any] = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["pooler", "dense"] ) A : Optional[Any] = getattr(_lowerCamelCase , "pooler" ) A : Optional[Any] = getattr(_lowerCamelCase , "dense" ) elif m_name == "embeddings": trace.append("embeddings" ) A : Dict = getattr(_lowerCamelCase , "embeddings" ) if layer_num == 0: trace.append("word_embeddings" ) A : Dict = getattr(_lowerCamelCase , "word_embeddings" ) elif layer_num == 1: trace.append("position_embeddings" ) A : str = getattr(_lowerCamelCase , "position_embeddings" ) elif layer_num == 2: trace.append("token_type_embeddings" ) A : Optional[int] = getattr(_lowerCamelCase , "token_type_embeddings" ) else: raise ValueError(f"""Unknown embedding layer with name {full_name}""" ) trace.append("weight" ) A : Tuple = getattr(_lowerCamelCase , "weight" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["attention", "self"] ) A : int = getattr(_lowerCamelCase , "attention" ) A : Union[str, Any] = getattr(_lowerCamelCase , "self" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["attention", "output", "LayerNorm"] ) A : Union[str, Any] = getattr(_lowerCamelCase , "attention" ) A : Union[str, Any] = getattr(_lowerCamelCase , "output" ) A : Any = getattr(_lowerCamelCase , "LayerNorm" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["attention", "output", "dense"] ) A : List[Any] = getattr(_lowerCamelCase , "attention" ) A : Dict = getattr(_lowerCamelCase , "output" ) A : List[str] = getattr(_lowerCamelCase , "dense" ) elif m_name == "_output_dense": # output dense trace.extend(["output", "dense"] ) A : int = getattr(_lowerCamelCase , "output" ) A : Dict = getattr(_lowerCamelCase , "dense" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["output", "LayerNorm"] ) A : Optional[int] = getattr(_lowerCamelCase , "output" ) A : Optional[int] = getattr(_lowerCamelCase , "LayerNorm" ) elif m_name == "_key_dense": # attention key trace.append("key" ) A : Optional[Any] = getattr(_lowerCamelCase , "key" ) elif m_name == "_query_dense": # attention query trace.append("query" ) A : List[Any] = getattr(_lowerCamelCase , "query" ) elif m_name == "_value_dense": # attention value trace.append("value" ) A : Optional[Any] = getattr(_lowerCamelCase , "value" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["intermediate", "dense"] ) A : Any = getattr(_lowerCamelCase , "intermediate" ) A : Tuple = getattr(_lowerCamelCase , "dense" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("output" ) A : List[Any] = getattr(_lowerCamelCase , "output" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("bias" ) A : Optional[int] = getattr(_lowerCamelCase , "bias" ) elif m_name in ["kernel", "gamma"]: trace.append("weight" ) A : str = getattr(_lowerCamelCase , "weight" ) else: logger.warning(f"""Ignored {m_name}""" ) # for certain layers reshape is necessary A : List[Any] = ".".join(_lowerCamelCase ) if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , _lowerCamelCase ) or re.match( R"(\S+)\.attention\.output\.dense\.weight" , _lowerCamelCase ): A : Any = array.reshape(pointer.data.shape ) if "kernel" in full_name: A : Tuple = array.transpose() if pointer.shape == array.shape: A : Union[str, Any] = torch.from_numpy(_lowerCamelCase ) else: raise ValueError( f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" f""" {array.shape}""" ) logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # Instantiate model logger.info(f"""Loading model based on config from {config_path}...""" ) A : List[str] = BertConfig.from_json_file(_lowerCamelCase ) A : Optional[Any] = BertModel(_lowerCamelCase ) # Load weights from checkpoint logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save pytorch-model logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , _lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model (must include filename).""", ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
17
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ): A : str = symbols(_lowerCamelCase ) A : int = lambdify(_lowerCamelCase , _lowerCamelCase ) A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) A : Optional[int] = starting_point while True: if diff_function(_lowerCamelCase ) != 0: A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
17
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE = { """configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""], """feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""], """processing_wav2vec2""": ["""Wav2Vec2Processor"""], """tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Wav2Vec2ForAudioFrameClassification""", """Wav2Vec2ForCTC""", """Wav2Vec2ForMaskedLM""", """Wav2Vec2ForPreTraining""", """Wav2Vec2ForSequenceClassification""", """Wav2Vec2ForXVector""", """Wav2Vec2Model""", """Wav2Vec2PreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWav2Vec2ForCTC""", """TFWav2Vec2Model""", """TFWav2Vec2PreTrainedModel""", """TFWav2Vec2ForSequenceClassification""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """FlaxWav2Vec2ForCTC""", """FlaxWav2Vec2ForPreTraining""", """FlaxWav2Vec2Model""", """FlaxWav2Vec2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 16384, } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) A : Any = add_prefix_space A : Tuple = pre_tok_class(**__lowerCamelCase ) A : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A : List[str] = "post_processor" A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: A : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A : Union[str, Any] = tuple(state["sep"] ) if "cls" in state: A : str = tuple(state["cls"] ) A : int = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: A : List[Any] = add_prefix_space A : Dict = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: A : Dict = trim_offsets A : str = True if changes_to_apply: A : int = getattr(__lowerCamelCase , state.pop("type" ) ) A : Dict = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict: A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value A : Tuple = value def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding: A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]: A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: A : str = [self.sep_token_id] A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: A : Dict = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A : Tuple = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": A : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
17
1
from __future__ import annotations __SCREAMING_SNAKE_CASE = list[list[int]] # assigning initial values to the grid __SCREAMING_SNAKE_CASE = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __SCREAMING_SNAKE_CASE = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def UpperCAmelCase ( _lowerCamelCase ): for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def UpperCAmelCase ( _lowerCamelCase ): if location := find_empty_location(_lowerCamelCase ): A , A : Tuple = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A : List[Any] = digit if sudoku(_lowerCamelCase ) is not None: return grid A : List[str] = 0 return None def UpperCAmelCase ( _lowerCamelCase ): for row in grid: for cell in row: print(_lowerCamelCase , end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") __SCREAMING_SNAKE_CASE = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
17
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
1
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str: A : List[Any] = parent A : Optional[int] = batch_size A : Any = image_size A : Optional[Any] = patch_size A : Optional[Any] = num_channels A : Tuple = is_training A : Optional[Any] = use_labels A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Union[str, Any] = intermediate_size A : Any = hidden_act A : Tuple = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = scope A : Optional[int] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[str] = (image_size // patch_size) ** 2 A : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : List[Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int: A : Optional[int] = DeiTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any: A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : List[str] = 1 A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict: A : str = self.type_sequence_label_size A : List[str] = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Any = 1 A : str = DeiTForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Dict = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ) : Tuple = config_and_inputs A : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]: A : str = DeiTModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int: pass def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str: A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: if not self.model_tester.is_training: return A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A : Union[str, Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Dict = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Tuple = False A : Any = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A : List[str] = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A : int = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): A : Tuple = problem_type["title"] A : Optional[Any] = problem_type["num_labels"] A : List[str] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) A : int = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: A : Optional[Any] = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( __lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : List[str] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : str = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) A : Dict = self.default_image_processor A : Optional[int] = prepare_img() A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A : List[str] = model(__lowerCamelCase )
17
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int: A : Optional[int] = parent A : List[str] = batch_size A : Tuple = image_size A : List[str] = num_channels A : List[str] = embeddings_size A : List[str] = hidden_sizes A : str = depths A : Optional[Any] = is_training A : int = use_labels A : Optional[int] = hidden_act A : List[Any] = num_labels A : List[str] = scope A : str = len(__lowerCamelCase ) A : Optional[int] = out_features A : str = out_indices A : Optional[int] = num_groups def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[int] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: A : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple: A : Union[str, Any] = self.num_labels A : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]: A : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A : Optional[Any] = None A : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() A : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: A : List[str] = self.prepare_config_and_inputs() A , A , A : Tuple = config_and_inputs A : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: A : Any = BitModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: return @unittest.skip(reason="Bit does not output attentions" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: A , A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(__lowerCamelCase ) A : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Optional[Any] = [*signature.parameters.keys()] A : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]: A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ): A : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]: A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) A : List[Any] = self.default_image_processor A : List[Any] = prepare_img() A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits A : str = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class lowerCamelCase_ ( _A ,unittest.TestCase ): '''simple docstring''' a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: A : Union[str, Any] = BitModelTester(self )
17
1
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCamelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ) -> None: warnings.warn( "The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PoolFormerImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
17
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim A : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim A : List[Any] = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
17
1
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = 42 a__ = 42 class lowerCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , __lowerCamelCase : int ) -> Tuple: A : list[list[Edge]] = [[] for _ in range(__lowerCamelCase )] A : int = size def __getitem__( self : Optional[int] , __lowerCamelCase : int ) -> Iterator[Edge]: return iter(self._graph[vertex] ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: return self._size def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[str]: if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1." ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size)." ) self._graph[from_vertex].append(Edge(__lowerCamelCase , __lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> int | None: A : List[Any] = deque([start_vertex] ) A : list[int | None] = [None] * self.size A : str = 0 while queue: A : Union[str, Any] = queue.popleft() A : int = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: A : Tuple = current_distance + edge.weight A : str = distances[edge.destination_vertex] if ( isinstance(__lowerCamelCase , __lowerCamelCase ) and new_distance >= dest_vertex_distance ): continue A : Tuple = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex." ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
17
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str: A : Optional[Any] = parent A : Optional[int] = batch_size A : List[str] = image_size A : List[str] = num_channels A : Tuple = embeddings_size A : Optional[int] = hidden_sizes A : Dict = depths A : Optional[int] = is_training A : List[str] = use_labels A : List[Any] = hidden_act A : Optional[int] = num_labels A : int = scope A : List[Any] = len(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]: A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: A : List[str] = TFRegNetModel(config=__lowerCamelCase ) A : str = model(__lowerCamelCase , training=__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]: A : List[Any] = self.num_labels A : int = TFRegNetForImageClassification(__lowerCamelCase ) A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict: A : Any = self.prepare_config_and_inputs() A , A , A : str = config_and_inputs A : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ): '''simple docstring''' a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: A : Optional[Any] = TFRegNetModelTester(self ) A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(__lowerCamelCase ) A : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): A : int = model_class(__lowerCamelCase ) A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Dict = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A , A : int = self.model_tester.prepare_config_and_inputs_for_common() A : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: A : List[str] = layer_type A : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict: A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ): A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: A : Tuple = model_class(__lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase ( ): A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A : Optional[int] = self.default_image_processor A : List[Any] = prepare_img() A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase ) # verify the logits A : Dict = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
17
1
__SCREAMING_SNAKE_CASE = { 0: """0""", 1: """1""", 2: """2""", 3: """3""", 4: """4""", 5: """5""", 6: """6""", 7: """7""", 8: """8""", 9: """9""", 10: """a""", 11: """b""", 12: """c""", 13: """d""", 14: """e""", 15: """f""", } def UpperCAmelCase ( _lowerCamelCase ): assert type(_lowerCamelCase ) in (int, float) and decimal == int(_lowerCamelCase ) A : Optional[int] = int(_lowerCamelCase ) A : Dict = "" A : Dict = False if decimal < 0: A : List[str] = True decimal *= -1 while decimal > 0: A , A : Optional[Any] = divmod(_lowerCamelCase , 16 ) A : Any = values[remainder] + hexadecimal A : Tuple = "0x" + hexadecimal if negative: A : Optional[Any] = "-" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
17
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = (PNDMScheduler,) a__ = (("num_inference_steps", 50),) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]: A : Union[str, Any] = { "num_train_timesteps": 10_00, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple: A : Dict = dict(self.forward_default_kwargs ) A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : Union[str, Any] = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Any = self.get_scheduler_config(**__lowerCamelCase ) A : int = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : Dict = scheduler_class.from_pretrained(__lowerCamelCase ) new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str: A : List[str] = dict(self.forward_default_kwargs ) A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) A : List[str] = self.dummy_sample A : Any = 0.1 * sample A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A : Tuple = self.get_scheduler_config() A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCamelCase ) A : str = scheduler_class.from_pretrained(__lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[:] A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]: A : Optional[Any] = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(**__lowerCamelCase ) A : str = scheduler_class(**__lowerCamelCase ) A : List[str] = 10 A : Union[str, Any] = self.dummy_model() A : int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A : Tuple = model(__lowerCamelCase , __lowerCamelCase ) A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: A : Union[str, Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) A : List[Any] = self.dummy_sample A : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCamelCase ) elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ): A : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A : Tuple = dummy_past_residuals[:] A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A : str = 27 for scheduler_class in self.scheduler_classes: A : Tuple = self.dummy_sample A : List[Any] = 0.1 * sample A : List[Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: with self.assertRaises(__lowerCamelCase ): A : Union[str, Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**__lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: A : Optional[Any] = self.full_loop() A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any: A : Any = self.full_loop(prediction_type="v_prediction" ) A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) A : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # We specify different beta, so that the first alpha is 0.99 A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
17
1
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # TODO Update this __SCREAMING_SNAKE_CASE = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class lowerCamelCase_ ( _A ): '''simple docstring''' a__ = "esm" def __init__( self : Optional[Any] , __lowerCamelCase : str=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=7_68 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=30_72 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=10_26 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Union[str, Any]=1e-12 , __lowerCamelCase : Optional[Any]="absolute" , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[str]=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , **__lowerCamelCase : List[str] , ) -> Any: super().__init__(pad_token_id=__lowerCamelCase , mask_token_id=__lowerCamelCase , **__lowerCamelCase ) A : str = vocab_size A : Optional[Any] = hidden_size A : Tuple = num_hidden_layers A : Any = num_attention_heads A : Tuple = intermediate_size A : List[Any] = hidden_dropout_prob A : List[str] = attention_probs_dropout_prob A : List[Any] = max_position_embeddings A : Any = initializer_range A : Tuple = layer_norm_eps A : Tuple = position_embedding_type A : Dict = use_cache A : Optional[Any] = emb_layer_norm_before A : Tuple = token_dropout A : Optional[int] = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) A : Dict = EsmFoldConfig() elif isinstance(__lowerCamelCase , __lowerCamelCase ): A : List[str] = EsmFoldConfig(**__lowerCamelCase ) A : str = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) A : str = get_default_vocab_list() else: A : List[str] = vocab_list else: A : Tuple = None A : Union[str, Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , __lowerCamelCase ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: A : Tuple = super().to_dict() if isinstance(self.esmfold_config , __lowerCamelCase ): A : int = self.esmfold_config.to_dict() return output @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = None a__ = True a__ = False a__ = False a__ = False a__ = 0 a__ = True a__ = False a__ = 128 a__ = None def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: if self.trunk is None: A : Dict = TrunkConfig() elif isinstance(self.trunk , __lowerCamelCase ): A : str = TrunkConfig(**self.trunk ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> str: A : int = asdict(self ) A : Dict = self.trunk.to_dict() return output @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = 48 a__ = 1024 a__ = 128 a__ = 32 a__ = 32 a__ = 32 a__ = 0 a__ = 0 a__ = False a__ = 4 a__ = 128 a__ = None def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: if self.structure_module is None: A : Optional[int] = StructureModuleConfig() elif isinstance(self.structure_module , __lowerCamelCase ): A : Optional[Any] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) A : List[str] = self.sequence_state_dim // self.sequence_head_width A : Optional[Any] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: A : Any = asdict(self ) A : int = self.structure_module.to_dict() return output @dataclass class lowerCamelCase_ : '''simple docstring''' a__ = 384 a__ = 128 a__ = 16 a__ = 128 a__ = 12 a__ = 4 a__ = 8 a__ = 0.1 a__ = 8 a__ = 1 a__ = 2 a__ = 7 a__ = 10 a__ = 1E-8 a__ = 1E5 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: return asdict(self ) def UpperCAmelCase ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
17
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: A : Tuple = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: A : Dict = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
17
1
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __SCREAMING_SNAKE_CASE = data_utils.TransfoXLTokenizer __SCREAMING_SNAKE_CASE = data_utils.TransfoXLCorpus __SCREAMING_SNAKE_CASE = data_utils __SCREAMING_SNAKE_CASE = data_utils def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(_lowerCamelCase , "rb" ) as fp: A : Dict = pickle.load(_lowerCamelCase , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) A : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" ) A : Dict = corpus.vocab.__dict__ torch.save(_lowerCamelCase , _lowerCamelCase ) A : Optional[int] = corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , _lowerCamelCase ) A : Optional[int] = pytorch_dump_folder_path + "/" + CORPUS_NAME print(f"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(_lowerCamelCase , _lowerCamelCase ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model A : str = os.path.abspath(_lowerCamelCase ) A : Optional[Any] = os.path.abspath(_lowerCamelCase ) print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": A : int = TransfoXLConfig() else: A : Dict = TransfoXLConfig.from_json_file(_lowerCamelCase ) print(f"""Building PyTorch model from configuration: {config}""" ) A : int = TransfoXLLMHeadModel(_lowerCamelCase ) A : str = load_tf_weights_in_transfo_xl(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save pytorch-model A : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) A : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase ) print(f"""Save PyTorch model to {os.path.abspath(_lowerCamelCase )}""" ) torch.save(model.state_dict() , _lowerCamelCase ) print(f"""Save configuration file to {os.path.abspath(_lowerCamelCase )}""" ) with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--tf_checkpoint_path""", default="""""", type=str, help="""An optional path to a TensorFlow checkpoint path to be converted.""", ) parser.add_argument( """--transfo_xl_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--transfo_xl_dataset_file""", default="""""", type=str, help="""An optional dataset file to be converted in a vocabulary.""", ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
17
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ): A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( _lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) ) A : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A : Any = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) # convert tokenizer A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
1
from scipy.stats import pearsonr import datasets __SCREAMING_SNAKE_CASE = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n" __SCREAMING_SNAKE_CASE = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n" __SCREAMING_SNAKE_CASE = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=False ) -> Tuple: if return_pvalue: A : Optional[Any] = pearsonr(A__ , A__ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(A__ , A__ )[0] )}
700
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
17
0