code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from random import shuffle import tensorflow as tf from numpy import array def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Optional[Any]): A_ : str = int(lowerCamelCase) assert noofclusters < len(lowerCamelCase) # Find out the dimensionality A_ : Union[str, Any] = len(vectors[0]) # Will help select random centroids from among the available vectors A_ : int = list(range(len(lowerCamelCase))) shuffle(lowerCamelCase) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. A_ : Dict = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION A_ : Union[str, Any] = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points A_ : List[Any] = [ tf.Variable(vectors[vector_indices[i]]) for i in range(lowerCamelCase) ] ##These nodes will assign the centroid Variables the appropriate ##values A_ : str = tf.placeholder("""float64""" , [dim]) A_ : Dict = [] for centroid in centroids: cent_assigns.append(tf.assign(lowerCamelCase , lowerCamelCase)) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) A_ : List[Any] = [tf.Variable(0) for i in range(len(lowerCamelCase))] ##These nodes will assign an assignment Variable the appropriate ##value A_ : List[str] = tf.placeholder("""int32""") A_ : Tuple = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowerCamelCase , lowerCamelCase)) ##Now lets construct the node that will compute the mean # The placeholder for the input A_ : Optional[int] = tf.placeholder("""float""" , [None, dim]) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors A_ : str = tf.reduce_mean(lowerCamelCase , 0) ##Node for computing Euclidean distances # Placeholders for input A_ : List[Any] = tf.placeholder("""float""" , [dim]) A_ : str = tf.placeholder("""float""" , [dim]) A_ : str = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCamelCase , lowerCamelCase) , 2))) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input A_ : int = tf.placeholder("""float""" , [noofclusters]) A_ : Optional[int] = tf.argmin(lowerCamelCase , 0) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. A_ : List[Any] = tf.initialize_all_variables() # Initialize all variables sess.run(lowerCamelCase) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. A_ : Tuple = 100 for _ in range(lowerCamelCase): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowerCamelCase)): A_ : Tuple = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. A_ : Dict = [ sess.run(lowerCamelCase , feed_dict={va: vect, va: sess.run(lowerCamelCase)}) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input A_ : int = sess.run( lowerCamelCase , feed_dict={centroid_distances: distances}) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment}) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowerCamelCase): # Collect all the vectors assigned to this cluster A_ : str = [ vectors[i] for i in range(len(lowerCamelCase)) if sess.run(assignments[i]) == cluster_n ] # Compute new centroid location A_ : str = sess.run( lowerCamelCase , feed_dict={mean_input: array(lowerCamelCase)}) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location}) # Return centroids and assignments A_ : Optional[Any] = sess.run(lowerCamelCase) A_ : Dict = sess.run(lowerCamelCase) return centroids, assignments
703
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"): A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {} A_ : Optional[int] = padding_side return tokenizer( [line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , ) def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ): A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,): '''simple docstring''' super().__init__() A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" ) A_ : Any = Path(_a ).joinpath(type_path + """.target""" ) A_ : Dict = self.get_char_lens(self.src_file ) A_ : Optional[int] = max_source_length A_ : List[str] = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' A_ : List[Any] = tokenizer A_ : Optional[Any] = prefix if n_obs is not None: A_ : Any = self.src_lens[:n_obs] A_ : Optional[int] = src_lang A_ : Tuple = tgt_lang def __len__( self : Tuple ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : List[str] ,_a : Tuple ): '''simple docstring''' A_ : int = index + 1 # linecache starts at 1 A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" ) A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_a ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right A_ : List[str] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer ) A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" ) A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" ) A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze() A_ : Dict = target_inputs["""input_ids"""].squeeze() A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _a ( _a : int ): '''simple docstring''' return [len(_a ) for x in Path(_a ).open().readlines()] def _a ( self : Optional[int] ,_a : Dict ): '''simple docstring''' A_ : str = torch.stack([x["""input_ids"""] for x in batch] ) A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] ) A_ : Union[str, Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : str = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : List[str] = trim_batch(_a ,_a ) A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a ) A_ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __magic_name__ = getLogger(__name__) def lowerCamelCase ( lowerCamelCase : List[List]): return list(itertools.chain.from_iterable(lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : str): A_ : Union[str, Any] = get_git_info() save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json""")) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]): with open(lowerCamelCase , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase) def lowerCamelCase ( lowerCamelCase : Any): with open(lowerCamelCase) as f: return json.load(lowerCamelCase) def lowerCamelCase ( ): A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase) A_ : Union[str, Any] = { """repo_id""": str(lowerCamelCase), """repo_sha""": str(repo.head.object.hexsha), """repo_branch""": str(repo.active_branch), """hostname""": str(socket.gethostname()), } return repo_infos def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable): return list(map(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]): with open(lowerCamelCase , """wb""") as f: return pickle.dump(lowerCamelCase , lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str]): def remove_articles(lowerCamelCase : Any): return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase) def white_space_fix(lowerCamelCase : List[Any]): return " ".join(text.split()) def remove_punc(lowerCamelCase : Union[str, Any]): A_ : Optional[int] = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(lowerCamelCase : List[str]): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase)))) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int): A_ : Tuple = normalize_answer(lowerCamelCase).split() A_ : Dict = normalize_answer(lowerCamelCase).split() A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase) A_ : Any = sum(common.values()) if num_same == 0: return 0 A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any): return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]): assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Any = 0 for hypo, pred in zip(lowerCamelCase , lowerCamelCase): em += exact_match_score(lowerCamelCase , lowerCamelCase) if len(lowerCamelCase) > 0: em /= len(lowerCamelCase) return {"em": em} def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return model_prefix.startswith("""rag""") def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]): A_ : Optional[Any] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead A_ : Tuple = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase): if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) continue A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p] setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) return hparams, config
27
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __magic_name__ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['ConvNextFeatureExtractor'] __magic_name__ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
704
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
0
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed __magic_name__ = logging.getLogger(__name__) def lowerCamelCase ( lowerCamelCase : Tuple=2 , lowerCamelCase : Tuple=3 , lowerCamelCase : Tuple=16 , lowerCamelCase : int = 10 , lowerCamelCase : int = 2): def get_dataset(lowerCamelCase : Optional[int]): A_ : Tuple = torch.randn(batch_size * n_batches , 1) return TensorDataset(lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1)) A_ : str = get_dataset(lowerCamelCase) A_ : Optional[Any] = get_dataset(lowerCamelCase) A_ : List[Any] = DataLoader(lowerCamelCase , shuffle=lowerCamelCase , batch_size=lowerCamelCase , num_workers=4) A_ : Optional[Any] = DataLoader(lowerCamelCase , shuffle=lowerCamelCase , batch_size=lowerCamelCase , num_workers=4) return (train_dataloader, valid_dataloader) def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : str=None): A_ : List[str] = [] for epoch in range(lowerCamelCase): # Train quickly model.train() for batch in dataloader: A_ : int = batch A_ : Optional[int] = model(lowerCamelCase) A_ : str = torch.nn.functional.mse_loss(lowerCamelCase , lowerCamelCase) accelerator.backward(lowerCamelCase) optimizer.step() optimizer.zero_grad() rands.append(random.random()) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class __lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ): '''simple docstring''' super().__init__() A_ : Optional[Any] = nn.Parameter(torch.randn(1 ) ) A_ : List[str] = nn.Parameter(torch.randn(1 ) ) def _a ( self : List[Any] ,_a : int ): '''simple docstring''' return x * self.a + self.b class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Union[str, Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) A_ : Optional[Any] = DummyModel() A_ : Optional[int] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) A_ : int = dummy_dataloaders() A_ : int = ProjectConfiguration(total_limit=1 ,project_dir=_a ,automatic_checkpoint_naming=_a ) # Train baseline A_ : Optional[Any] = Accelerator(project_config=_a ) A_ : Union[str, Any] = accelerator.prepare( _a ,_a ,_a ,_a ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 ) def _a ( self : Union[str, Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) A_ : Optional[Any] = DummyModel() A_ : Dict = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) A_ : Optional[Any] = dummy_dataloaders() # Train baseline A_ : List[Any] = Accelerator() A_ : Dict = accelerator.prepare( _a ,_a ,_a ,_a ) # Save initial A_ : Dict = os.path.join(_a ,"""initial""" ) accelerator.save_state(_a ) (A_) : Optional[Any] = model.a.item(), model.b.item() A_ : Tuple = optimizer.state_dict() A_ : List[str] = train(3 ,_a ,_a ,_a ,_a ) (A_) : Union[str, Any] = model.a.item(), model.b.item() A_ : Optional[int] = optimizer.state_dict() # Train partially set_seed(42 ) A_ : Optional[Any] = DummyModel() A_ : List[Any] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) A_ : Tuple = dummy_dataloaders() A_ : Optional[Any] = Accelerator() A_ : Optional[int] = accelerator.prepare( _a ,_a ,_a ,_a ) accelerator.load_state(_a ) (A_) : Any = model.a.item(), model.b.item() A_ : str = optimizer.state_dict() self.assertEqual(_a ,_a ) self.assertEqual(_a ,_a ) self.assertEqual(_a ,_a ) A_ : Tuple = train(2 ,_a ,_a ,_a ,_a ) # Save everything A_ : Any = os.path.join(_a ,"""checkpoint""" ) accelerator.save_state(_a ) # Load everything back in and make sure all states work accelerator.load_state(_a ) test_rands += train(1 ,_a ,_a ,_a ,_a ) (A_) : List[str] = model.a.item(), model.b.item() A_ : Optional[Any] = optimizer.state_dict() self.assertEqual(_a ,_a ) self.assertEqual(_a ,_a ) self.assertEqual(_a ,_a ) self.assertEqual(_a ,_a ) def _a ( self : Dict ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) A_ : List[str] = DummyModel() A_ : List[str] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) A_ : Dict = dummy_dataloaders() A_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_a ) # Train baseline A_ : int = Accelerator(project_dir=_a ,project_config=_a ) A_ : Tuple = accelerator.prepare( _a ,_a ,_a ,_a ) # Save initial accelerator.save_state() (A_) : Tuple = model.a.item(), model.b.item() A_ : Union[str, Any] = optimizer.state_dict() A_ : Tuple = train(3 ,_a ,_a ,_a ,_a ) (A_) : Any = model.a.item(), model.b.item() A_ : List[Any] = optimizer.state_dict() # Train partially set_seed(42 ) A_ : Any = DummyModel() A_ : Tuple = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) A_ : Any = dummy_dataloaders() A_ : Union[str, Any] = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=_a ) A_ : List[Any] = Accelerator(project_dir=_a ,project_config=_a ) A_ : Union[str, Any] = accelerator.prepare( _a ,_a ,_a ,_a ) accelerator.load_state(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_0""" ) ) (A_) : Optional[int] = model.a.item(), model.b.item() A_ : List[str] = optimizer.state_dict() self.assertEqual(_a ,_a ) self.assertEqual(_a ,_a ) self.assertEqual(_a ,_a ) A_ : Optional[int] = train(2 ,_a ,_a ,_a ,_a ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_1""" ) ) test_rands += train(1 ,_a ,_a ,_a ,_a ) (A_) : List[Any] = model.a.item(), model.b.item() A_ : Optional[int] = optimizer.state_dict() self.assertEqual(_a ,_a ) self.assertEqual(_a ,_a ) self.assertEqual(_a ,_a ) self.assertEqual(_a ,_a ) def _a ( self : int ): '''simple docstring''' A_ : Optional[int] = torch.tensor([1, 2, 3] ) A_ : str = torch.tensor([2, 3, 4] ) A_ : str = DummyModel() A_ : Tuple = torch.optim.Adam(net.parameters() ) A_ : List[str] = Accelerator() with self.assertRaises(_a ) as ve: accelerator.register_for_checkpointing(_a ,_a ,_a ,_a ) A_ : Union[str, Any] = str(ve.exception ) self.assertTrue("""Item at index 0""" in message ) self.assertTrue("""Item at index 1""" in message ) self.assertFalse("""Item at index 2""" in message ) self.assertFalse("""Item at index 3""" in message ) def _a ( self : Tuple ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) A_ : List[str] = DummyModel() A_ : Any = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) A_ : str = torch.optim.lr_scheduler.StepLR(_a ,step_size=1 ,gamma=0.99 ) A_ : List[str] = dummy_dataloaders() A_ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=_a ) # Train baseline A_ : List[Any] = Accelerator(project_dir=_a ,project_config=_a ) A_ : str = accelerator.prepare( _a ,_a ,_a ,_a ,_a ) # Save initial accelerator.save_state() A_ : List[Any] = scheduler.state_dict() train(3 ,_a ,_a ,_a ,_a ,_a ) self.assertNotEqual(_a ,scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_0""" ) ) self.assertEqual(_a ,scheduler.state_dict() ) def _a ( self : Union[str, Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) A_ : Any = DummyModel() A_ : str = ProjectConfiguration(automatic_checkpoint_naming=_a ,total_limit=2 ) # Train baseline A_ : List[str] = Accelerator(project_dir=_a ,project_config=_a ) A_ : List[str] = accelerator.prepare(_a ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_9""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_10""" ) ) ) @require_cuda def _a ( self : Any ): '''simple docstring''' A_ : List[str] = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(_a ,env=os.environ.copy() ) if __name__ == "__main__": __magic_name__ = '/tmp/accelerate/state_checkpointing' __magic_name__ = DummyModel() __magic_name__ = torch.optim.Adam(params=model.parameters(), lr=1e-3) __magic_name__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9) __magic_name__ , __magic_name__ = dummy_dataloaders() __magic_name__ = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline __magic_name__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) __magic_name__ , __magic_name__ = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: __magic_name__ = group['params'][0].device break assert param_device.type == accelerator.device.type __magic_name__ = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: __magic_name__ = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: __magic_name__ = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
705
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = KandinskyVaaControlnetPipeline a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a_ = False @property def _a ( self : Any ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return self.time_input_dim @property def _a ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def _a ( self : Optional[Any] ): '''simple docstring''' return 100 @property def _a ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) A_ : List[Any] = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } A_ : Tuple = UNetaDConditionModel(**_a ) return model @property def _a ( self : List[str] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _a ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) A_ : int = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : List[str] ): '''simple docstring''' A_ : Optional[Any] = self.dummy_unet A_ : int = self.dummy_movq A_ : Tuple = DDIMScheduler( num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,) A_ : int = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ): '''simple docstring''' A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a ) A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( _a ) # create hint A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a ) if str(_a ).startswith("""mps""" ): A_ : Optional[Any] = torch.manual_seed(_a ) else: A_ : str = torch.Generator(device=_a ).manual_seed(_a ) A_ : List[Any] = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def _a ( self : Dict ): '''simple docstring''' A_ : List[Any] = """cpu""" A_ : List[str] = self.get_dummy_components() A_ : Tuple = self.pipeline_class(**_a ) A_ : Dict = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) ) A_ : Tuple = output.images A_ : Optional[Any] = pipe( **self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0] A_ : Tuple = image[0, -3:, -3:, -1] A_ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : List[Any] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Any ): '''simple docstring''' A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) A_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0 A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(_a ) A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) A_ : Union[str, Any] = pipeline.to(_a ) pipeline.set_progress_bar_config(disable=_a ) A_ : Optional[Any] = """A robot, 4k photo""" A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ , A_ : List[str] = pipe_prior( _a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ : List[Any] = pipeline( image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,) A_ : Dict = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_a ,_a )
27
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ShapEImgaImgPipeline a_ = ["""image"""] a_ = ["""image"""] a_ = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a_ = False @property def _a ( self : str ): '''simple docstring''' return 32 @property def _a ( self : str ): '''simple docstring''' return 32 @property def _a ( self : List[Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def _a ( self : List[str] ): '''simple docstring''' return 8 @property def _a ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) A_ : Tuple = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,) A_ : Optional[Any] = CLIPVisionModel(_a ) return model @property def _a ( self : str ): '''simple docstring''' A_ : Tuple = CLIPImageProcessor( crop_size=224 ,do_center_crop=_a ,do_normalize=_a ,do_resize=_a ,image_mean=[0.48145466, 0.4578275, 0.40821073] ,image_std=[0.26862954, 0.26130258, 0.27577711] ,resample=3 ,size=224 ,) return image_processor @property def _a ( self : str ): '''simple docstring''' torch.manual_seed(0 ) A_ : Tuple = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """embedding_proj_norm_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } A_ : Optional[Any] = PriorTransformer(**_a ) return model @property def _a ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) A_ : List[str] = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } A_ : List[Any] = ShapERenderer(**_a ) return model def _a ( self : List[str] ): '''simple docstring''' A_ : str = self.dummy_prior A_ : Union[str, Any] = self.dummy_image_encoder A_ : List[Any] = self.dummy_image_processor A_ : Union[str, Any] = self.dummy_renderer A_ : List[Any] = HeunDiscreteScheduler( beta_schedule="""exp""" ,num_train_timesteps=1024 ,prediction_type="""sample""" ,use_karras_sigmas=_a ,clip_sample=_a ,clip_sample_range=1.0 ,) A_ : Tuple = { """prior""": prior, """image_encoder""": image_encoder, """image_processor""": image_processor, """renderer""": renderer, """scheduler""": scheduler, } return components def _a ( self : int ,_a : Dict ,_a : List[str]=0 ): '''simple docstring''' A_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a ) if str(_a ).startswith("""mps""" ): A_ : Optional[int] = torch.manual_seed(_a ) else: A_ : List[str] = torch.Generator(device=_a ).manual_seed(_a ) A_ : Tuple = { """image""": input_image, """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Optional[Any] = """cpu""" A_ : Tuple = self.get_dummy_components() A_ : Optional[int] = self.pipeline_class(**_a ) A_ : Optional[int] = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Optional[Any] = pipe(**self.get_dummy_inputs(_a ) ) A_ : Tuple = output.images[0] A_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) A_ : Dict = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Optional[int] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _a ( self : List[Any] ): '''simple docstring''' A_ : int = torch_device == """cpu""" A_ : List[Any] = True self._test_inference_batch_single_identical( batch_size=2 ,test_max_difference=_a ,relax_max_difference=_a ,) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : str = self.get_dummy_components() A_ : Optional[int] = self.pipeline_class(**_a ) A_ : List[str] = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : str = 1 A_ : str = 2 A_ : List[Any] = self.get_dummy_inputs(_a ) for key in inputs.keys(): if key in self.batch_params: A_ : str = batch_size * [inputs[key]] A_ : Dict = pipe(**_a ,num_images_per_prompt=_a )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : List[Any] ): '''simple docstring''' A_ : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" ) A_ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_img2img_out.npy""" ) A_ : Tuple = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" ) A_ : Union[str, Any] = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Dict = torch.Generator(device=_a ).manual_seed(0 ) A_ : Optional[Any] = pipe( _a ,generator=_a ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="""np""" ,).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_a ,_a )
706
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """deberta-v2""" def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,): '''simple docstring''' super().__init__(**_a ) A_ : Union[str, Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : List[Any] = intermediate_size A_ : List[Any] = hidden_act A_ : Optional[int] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Any = type_vocab_size A_ : List[Any] = initializer_range A_ : int = relative_attention A_ : Tuple = max_relative_positions A_ : int = pad_token_id A_ : Tuple = position_biased_input # Backwards compatibility if type(_a ) == str: A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )] A_ : Any = pos_att_type A_ : Optional[int] = vocab_size A_ : Tuple = layer_norm_eps A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a ) A_ : Union[str, Any] = pooler_dropout A_ : List[Any] = pooler_hidden_act class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : Any = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def _a ( self : Optional[int] ): '''simple docstring''' return 12 def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
27
0
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging __magic_name__ = logging.get_logger(__name__) def lowerCamelCase ( lowerCamelCase : Any): A_ : str = r"""\w+[.]\d+""" A_ : int = re.findall(lowerCamelCase , lowerCamelCase) for pat in pats: A_ : Optional[int] = key.replace(lowerCamelCase , """_""".join(pat.split("""."""))) return key def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]): A_ : Optional[int] = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): A_ : Any = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: A_ : str = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: A_ : List[str] = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer A_ : Tuple = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: A_ : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0) return renamed_pt_tuple_key, pt_tensor # linear layer A_ : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": A_ : List[str] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A_ : int = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A_ : Union[str, Any] = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any]=42): # Step 1: Convert pytorch tensor to numpy A_ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params A_ : Dict = flax_model.init_weights(PRNGKey(lowerCamelCase)) A_ : Any = flatten_dict(lowerCamelCase) A_ : List[str] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A_ : Optional[int] = rename_key(lowerCamelCase) A_ : Optional[Any] = tuple(renamed_pt_key.split(""".""")) # Correctly rename weight parameters A_ : Optional[int] = rename_key_and_reshape_tensor(lowerCamelCase , lowerCamelCase , lowerCamelCase) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.') # also add unexpected weight so that warning is thrown A_ : int = jnp.asarray(lowerCamelCase) return unflatten_dict(lowerCamelCase)
707
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') __magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:]) __magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) __magic_name__ = BeautifulSoup(res.text, 'html.parser') __magic_name__ = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(f"""https://google.com{link.get('href')}""")
27
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,): '''simple docstring''' A_ : Optional[Any] = parent A_ : str = batch_size A_ : int = seq_length A_ : Union[str, Any] = is_training A_ : Optional[Any] = use_token_type_ids A_ : int = use_labels A_ : Dict = vocab_size A_ : List[Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : int = intermediate_size A_ : Tuple = hidden_act A_ : int = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Tuple = type_sequence_label_size A_ : int = initializer_range A_ : Optional[Any] = num_labels A_ : str = num_choices A_ : Optional[Any] = scope A_ : List[Any] = self.vocab_size - 1 def _a ( self : Any ): '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : List[Any] = None if self.use_token_type_ids: A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : int = None A_ : str = None A_ : Union[str, Any] = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Any = ids_tensor([self.batch_size] ,self.num_choices ) A_ : List[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ): '''simple docstring''' A_ : Optional[Any] = OpenAIGPTModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a ) A_ : str = model(_a ,token_type_ids=_a ) A_ : Dict = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ): '''simple docstring''' A_ : str = OpenAIGPTLMHeadModel(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ): '''simple docstring''' A_ : Any = OpenAIGPTDoubleHeadsModel(_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ): '''simple docstring''' A_ : List[str] = self.num_labels A_ : int = OpenAIGPTForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = self.prepare_config_and_inputs() ( A_ ) : str = config_and_inputs A_ : int = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) a_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly a_ = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ): '''simple docstring''' A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A_ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,) A_ : Any = inputs_dict["""labels"""] A_ : Any = inputs_dict["""labels"""] A_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,) A_ : int = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=_a ) return inputs_dict def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Tuple = OpenAIGPTModelTester(self ) A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 ) def _a ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_a ) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a ) @slow def _a ( self : List[Any] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : List[str] ): '''simple docstring''' A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(_a ) A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is A_ : Dict = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A_ : int = model.generate(_a ,do_sample=_a ) self.assertListEqual(output_ids[0].tolist() ,_a )
708
'''simple docstring''' from ... import PretrainedConfig __magic_name__ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP a_ = """nezha""" def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Any = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Tuple = hidden_act A_ : List[Any] = intermediate_size A_ : List[str] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Optional[Any] = max_relative_position A_ : List[Any] = type_vocab_size A_ : int = initializer_range A_ : Tuple = layer_norm_eps A_ : Dict = classifier_dropout A_ : int = use_cache
27
0
'''simple docstring''' import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def _a ( self : int ,_a : Optional[int]=0 ): '''simple docstring''' A_ : Any = np.random.RandomState(_a ) A_ : List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_a ) A_ : Union[str, Any] = self.get_dummy_inputs() A_ : List[Any] = pipe(**_a ).images A_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : List[Any] = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Any ): '''simple docstring''' A_ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) A_ : Any = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Dict = self.get_dummy_inputs() A_ : Union[str, Any] = pipe(**_a ).images A_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : int = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Dict ): '''simple docstring''' A_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) A_ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) A_ : Optional[int] = self.get_dummy_inputs() A_ : Optional[int] = pipe(**_a ).images A_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : int = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) A_ : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) A_ : Dict = self.get_dummy_inputs() A_ : Union[str, Any] = pipe(**_a ).images A_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Dict = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Optional[Any] ): '''simple docstring''' A_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) A_ : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) A_ : Union[str, Any] = self.get_dummy_inputs() A_ : Any = pipe(**_a ).images A_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Tuple = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : List[Any] ): '''simple docstring''' A_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) A_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = self.get_dummy_inputs() A_ : int = pipe(**_a ).images A_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Union[str, Any] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_a ) A_ : str = self.get_dummy_inputs() A_ : List[str] = 3 * [inputs["""prompt"""]] # forward A_ : Optional[Any] = pipe(**_a ) A_ : Any = output.images[0, -3:, -3:, -1] A_ : str = self.get_dummy_inputs() A_ : Tuple = 3 * [inputs.pop("""prompt""" )] A_ : List[Any] = pipe.tokenizer( _a ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors="""np""" ,) A_ : Union[str, Any] = text_inputs["""input_ids"""] A_ : Tuple = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] A_ : Optional[int] = prompt_embeds # forward A_ : int = pipe(**_a ) A_ : Union[str, Any] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def _a ( self : Dict ): '''simple docstring''' A_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = self.get_dummy_inputs() A_ : Union[str, Any] = 3 * ["""this is a negative prompt"""] A_ : Any = negative_prompt A_ : Optional[Any] = 3 * [inputs["""prompt"""]] # forward A_ : Tuple = pipe(**_a ) A_ : Any = output.images[0, -3:, -3:, -1] A_ : List[Any] = self.get_dummy_inputs() A_ : List[str] = 3 * [inputs.pop("""prompt""" )] A_ : List[Any] = [] for p in [prompt, negative_prompt]: A_ : int = pipe.tokenizer( _a ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors="""np""" ,) A_ : Tuple = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) A_ : Union[str, Any] = embeds # forward A_ : List[Any] = pipe(**_a ) A_ : str = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @property def _a ( self : Tuple ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Any ): '''simple docstring''' A_ : Union[str, Any] = ort.SessionOptions() A_ : int = False return options def _a ( self : Any ): '''simple docstring''' A_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=_a ) A_ : Optional[int] = """A painting of a squirrel eating a burger""" np.random.seed(0 ) A_ : Dict = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type="""np""" ) A_ : Tuple = output.images A_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : Tuple = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Optional[int] ): '''simple docstring''' A_ : Dict = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" ) A_ : int = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=_a ) A_ : Dict = """open neural network exchange""" A_ : Optional[int] = np.random.RandomState(0 ) A_ : Union[str, Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_a ,output_type="""np""" ) A_ : Tuple = output.images A_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : str = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Any ): '''simple docstring''' A_ : str = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" ) A_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=_a ) A_ : Optional[int] = """open neural network exchange""" A_ : Optional[Any] = np.random.RandomState(0 ) A_ : int = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_a ,output_type="""np""" ) A_ : str = output.images A_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : str = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Any ): '''simple docstring''' A_ : int = 0 def test_callback_fn(_a : int ,_a : int ,_a : np.ndarray ) -> None: A_ : Any = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) A_ : Union[str, Any] = latents[0, -3:, -3:, -1] A_ : str = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) A_ : Any = latents[0, -3:, -3:, -1] A_ : int = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 A_ : int = False A_ : int = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=_a ) A_ : List[str] = """Andromeda galaxy in a bottle""" A_ : List[Any] = np.random.RandomState(0 ) pipe( prompt=_a ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=_a ,callback=_a ,callback_steps=1 ,) assert test_callback_fn.has_been_called assert number_of_steps == 6 def _a ( self : Dict ): '''simple docstring''' A_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) assert isinstance(_a ,_a ) assert pipe.safety_checker is None A_ : List[str] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_a ) A_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(_a ) # sanity check that the pipeline still works assert pipe.safety_checker is None A_ : int = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None
709
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str): A_ , A_ : List[Any] = set(lowerCamelCase), [start] while stack: A_ : Optional[Any] = stack.pop() explored.add(lowerCamelCase) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v]): if adj not in explored: stack.append(lowerCamelCase) return explored __magic_name__ = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, 'A'))
27
0
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __magic_name__ = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __magic_name__ = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __magic_name__ = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __magic_name__ = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __magic_name__ = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __magic_name__ = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __magic_name__ = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __magic_name__ = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __magic_name__ = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP a_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION a_ = DPRContextEncoderTokenizer class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP a_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a_ = DPRQuestionEncoderTokenizer __magic_name__ = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __magic_name__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __magic_name__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase : '''simple docstring''' def __call__( self : Any ,_a : List[str] ,_a : Optional[str] = None ,_a : Optional[str] = None ,_a : Union[bool, str] = False ,_a : Union[bool, str] = False ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[bool] = None ,**_a : Tuple ,): '''simple docstring''' if titles is None and texts is None: return super().__call__( _a ,padding=_a ,truncation=_a ,max_length=_a ,return_tensors=_a ,return_attention_mask=_a ,**_a ,) elif titles is None or texts is None: A_ : int = titles if texts is None else texts return super().__call__( _a ,_a ,padding=_a ,truncation=_a ,max_length=_a ,return_tensors=_a ,return_attention_mask=_a ,**_a ,) A_ : List[Any] = titles if not isinstance(_a ,_a ) else [titles] A_ : Dict = texts if not isinstance(_a ,_a ) else [texts] A_ : Tuple = len(_a ) A_ : Optional[Any] = questions if not isinstance(_a ,_a ) else [questions] * n_passages assert len(_a ) == len( _a ), f'There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.' A_ : str = super().__call__(_a ,_a ,padding=_a ,truncation=_a )["""input_ids"""] A_ : Union[str, Any] = super().__call__(_a ,add_special_tokens=_a ,padding=_a ,truncation=_a )["""input_ids"""] A_ : str = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_a ,_a ) ] } if return_attention_mask is not False: A_ : List[str] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) A_ : int = attention_mask return self.pad(_a ,padding=_a ,max_length=_a ,return_tensors=_a ) def _a ( self : Optional[int] ,_a : BatchEncoding ,_a : DPRReaderOutput ,_a : int = 16 ,_a : int = 64 ,_a : int = 4 ,): '''simple docstring''' A_ : Dict = reader_input["""input_ids"""] A_ : Optional[int] = reader_output[:3] A_ : Dict = len(_a ) A_ : Dict = sorted(range(_a ) ,reverse=_a ,key=relevance_logits.__getitem__ ) A_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: A_ : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence A_ : List[Any] = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A_ : List[str] = sequence_ids.index(self.pad_token_id ) else: A_ : Union[str, Any] = len(_a ) A_ : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_a ,top_spans=_a ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_a ,start_index=_a ,end_index=_a ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(_a ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _a ( self : Optional[int] ,_a : List[int] ,_a : List[int] ,_a : int ,_a : int ,): '''simple docstring''' A_ : List[str] = [] for start_index, start_score in enumerate(_a ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) A_ : Any = sorted(_a ,key=lambda _a : x[1] ,reverse=_a ) A_ : List[str] = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' A_ : List[str] = end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_a ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = READER_PRETRAINED_VOCAB_FILES_MAP a_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = READER_PRETRAINED_INIT_CONFIGURATION a_ = ["""input_ids""", """attention_mask"""] a_ = DPRReaderTokenizer
710
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) def lowerCamelCase ( lowerCamelCase : Dict): A_ : List[str] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: A_ : Union[str, Any] = [144, 192, 240] A_ : int = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: A_ : List[str] = [96, 120, 144] A_ : Any = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: A_ : Any = [64, 80, 96] A_ : List[str] = [16, 16, 24, 48, 64, 80, 320] A_ : Any = 0.05 A_ : List[Any] = 2.0 if mobilevit_name.startswith("""deeplabv3_"""): A_ : int = 512 A_ : Optional[int] = 16 A_ : List[Any] = 21 A_ : List[str] = """pascal-voc-id2label.json""" else: A_ : str = 1000 A_ : Any = """imagenet-1k-id2label.json""" A_ : Any = """huggingface/label-files""" A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r""")) A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()} A_ : Any = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False): for i in range(1 , 6): if F'layer_{i}.' in name: A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.') if "conv_1." in name: A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""") if ".block." in name: A_ : Optional[Any] = name.replace(""".block.""" , """.""") if "exp_1x1" in name: A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""") if "red_1x1" in name: A_ : int = name.replace("""red_1x1""" , """reduce_1x1""") if ".local_rep.conv_3x3." in name: A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""") if ".local_rep.conv_1x1." in name: A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""") if ".norm." in name: A_ : Tuple = name.replace(""".norm.""" , """.normalization.""") if ".conv." in name: A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""") if ".conv_proj." in name: A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""") for i in range(0 , 2): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.') for i in range(2 , 6): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.') if "expand_1x1" in name: A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""") if "conv_3x3" in name: A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""") if "reduce_1x1" in name: A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""") for i in range(2 , 5): if F'.global_rep.{i}.weight' in name: A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""") if F'.global_rep.{i}.bias' in name: A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""") if ".global_rep." in name: A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""") if ".pre_norm_mha.0." in name: A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""") if ".pre_norm_mha.1.out_proj." in name: A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""") if ".pre_norm_ffn.0." in name: A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""") if ".pre_norm_ffn.1." in name: A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""") if ".pre_norm_ffn.4." in name: A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""") if ".transformer." in name: A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""") if ".aspp_layer." in name: A_ : int = name.replace(""".aspp_layer.""" , """.""") if ".aspp_pool." in name: A_ : Tuple = name.replace(""".aspp_pool.""" , """.""") if "seg_head." in name: A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""") if "segmentation_head.classifier.classifier." in name: A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""") if "classifier.fc." in name: A_ : str = name.replace("""classifier.fc.""" , """classifier.""") elif (not base_model) and ("segmentation_head." not in name): A_ : str = """mobilevit.""" + name return name def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False): if base_model: A_ : Dict = """""" else: A_ : Any = """mobilevit.""" for key in orig_state_dict.copy().keys(): A_ : List[Any] = orig_state_dict.pop(lowerCamelCase) if key[:8] == "encoder.": A_ : int = key[8:] if "qkv" in key: A_ : Any = key.split(""".""") A_ : str = int(key_split[0][6:]) - 1 A_ : int = int(key_split[3]) A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}') A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size A_ : Optional[Any] = ( F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: A_ : Dict = val[:dim, :] A_ : Optional[int] = val[dim : dim * 2, :] A_ : List[Any] = val[-dim:, :] else: A_ : Optional[Any] = val[:dim] A_ : List[Any] = val[dim : dim * 2] A_ : Any = val[-dim:] else: A_ : List[str] = val return orig_state_dict def lowerCamelCase ( ): A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw) return im @torch.no_grad() def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False): A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase) # load original state_dict A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""") # load 🤗 model if mobilevit_name.startswith("""deeplabv3_"""): A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval() else: A_ : str = MobileViTForImageClassification(lowerCamelCase).eval() A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase) model.load_state_dict(lowerCamelCase) # Check outputs on an image, prepared by MobileViTImageProcessor A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32) A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""") A_ : List[Any] = model(**lowerCamelCase) A_ : Dict = outputs.logits if mobilevit_name.startswith("""deeplabv3_"""): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": A_ : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xs": A_ : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xxs": A_ : Tuple = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241]) elif mobilevit_name == "mobilevit_xs": A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587]) elif mobilevit_name == "mobilevit_xxs": A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4) Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase) print(F'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase) if push_to_hub: A_ : str = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""") A_ : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(lowerCamelCase , organization="""apple""") model.push_to_hub(lowerCamelCase , organization="""apple""") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __magic_name__ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
27
0
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def lowerCamelCase ( lowerCamelCase : Tuple): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod def _a ( _a : ArgumentParser ): '''simple docstring''' A_ : Any = parser.add_parser("""download""" ) download_parser.add_argument( """--cache-dir""" ,type=_a ,default=_a ,help="""Path to location to store the models""" ) download_parser.add_argument( """--force""" ,action="""store_true""" ,help="""Force the model to be download even if already in cache-dir""" ) download_parser.add_argument( """--trust-remote-code""" ,action="""store_true""" ,help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" ,) download_parser.add_argument("""model""" ,type=_a ,help="""Name of the model to download""" ) download_parser.set_defaults(func=_a ) def __init__( self : List[str] ,_a : str ,_a : str ,_a : bool ,_a : bool ): '''simple docstring''' A_ : int = model A_ : Tuple = cache A_ : Tuple = force A_ : str = trust_remote_code def _a ( self : str ): '''simple docstring''' from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
711
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __magic_name__ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""pixel_values"""] def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,): '''simple docstring''' super().__init__(**_a ) A_ : Tuple = size if size is not None else {"""shortest_edge""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" ) A_ : Any = do_resize A_ : List[str] = size A_ : Union[str, Any] = resample A_ : Dict = do_center_crop A_ : List[str] = crop_size A_ : Any = do_rescale A_ : Union[str, Any] = rescale_factor A_ : Any = do_normalize A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Tuple = do_convert_rgb def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,): '''simple docstring''' A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a ) return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a ) def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,): '''simple docstring''' A_ : Optional[int] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,): '''simple docstring''' return rescale(_a ,scale=_a ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,): '''simple docstring''' return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a ) def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,): '''simple docstring''' A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A_ : Tuple = size if size is not None else self.size A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a ) A_ : List[str] = resample if resample is not None else self.resample A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a ) A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Any = do_normalize if do_normalize is not None else self.do_normalize A_ : int = image_mean if image_mean is not None else self.image_mean A_ : int = image_std if image_std is not None else self.image_std A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : int = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : Optional[int] = [convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. A_ : Dict = [to_numpy_array(_a ) for image in images] if do_resize: A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images] if do_center_crop: A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images] if do_normalize: A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images] A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images] A_ : List[str] = {"""pixel_values""": images} return BatchFeature(data=_a ,tensor_type=_a )
27
0
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple): A_ : List[str] = """""" for i in table: res += inp[i - 1] return res def lowerCamelCase ( lowerCamelCase : List[Any]): return data[1:] + data[0] def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]): A_ : List[Any] = """""" for i in range(len(lowerCamelCase)): if a[i] == b[i]: res += "0" else: res += "1" return res def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int): A_ : str = int("""0b""" + data[0] + data[-1] , 2) A_ : Tuple = int("""0b""" + data[1:3] , 2) return bin(s[row][col])[2:] def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Tuple): A_ : Any = message[:4] A_ : Optional[Any] = message[4:] A_ : Any = apply_table(lowerCamelCase , lowerCamelCase) A_ : str = xor(lowerCamelCase , lowerCamelCase) A_ : Tuple = apply_sbox(lowerCamelCase , temp[:4]) # noqa: E741 A_ : Any = apply_sbox(lowerCamelCase , temp[4:]) A_ : List[str] = """0""" * (2 - len(lowerCamelCase)) + l # noqa: E741 A_ : Tuple = """0""" * (2 - len(lowerCamelCase)) + r A_ : Optional[Any] = apply_table(l + r , lowerCamelCase) A_ : Union[str, Any] = xor(lowerCamelCase , lowerCamelCase) return temp + right if __name__ == "__main__": __magic_name__ = input('Enter 10 bit key: ') __magic_name__ = input('Enter 8 bit message: ') __magic_name__ = [6, 3, 7, 4, 8, 5, 10, 9] __magic_name__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] __magic_name__ = [2, 4, 3, 1] __magic_name__ = [2, 6, 3, 1, 4, 8, 5, 7] __magic_name__ = [4, 1, 3, 5, 7, 2, 8, 6] __magic_name__ = [4, 1, 2, 3, 2, 3, 4, 1] __magic_name__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __magic_name__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __magic_name__ = apply_table(key, paa_table) __magic_name__ = temp[:5] __magic_name__ = temp[5:] __magic_name__ = left_shift(left) __magic_name__ = left_shift(right) __magic_name__ = apply_table(left + right, pa_table) __magic_name__ = left_shift(left) __magic_name__ = left_shift(right) __magic_name__ = left_shift(left) __magic_name__ = left_shift(right) __magic_name__ = apply_table(left + right, pa_table) # encryption __magic_name__ = apply_table(message, IP) __magic_name__ = function(expansion, sa, sa, keya, temp) __magic_name__ = temp[4:] + temp[:4] __magic_name__ = function(expansion, sa, sa, keya, temp) __magic_name__ = apply_table(temp, IP_inv) print('Cipher text is:', CT) # decryption __magic_name__ = apply_table(CT, IP) __magic_name__ = function(expansion, sa, sa, keya, temp) __magic_name__ = temp[4:] + temp[:4] __magic_name__ = function(expansion, sa, sa, keya, temp) __magic_name__ = apply_table(temp, IP_inv) print('Plain text after decypting is:', PT)
712
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ): '''simple docstring''' warnings.warn( """The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use OwlViTImageProcessor instead.""" ,_a ,) super().__init__(*_a ,**_a )
27
0
'''simple docstring''' import fire from utils import calculate_rouge, save_json def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Dict=None , **lowerCamelCase : Any): A_ : int = [x.strip() for x in open(lowerCamelCase).readlines()] A_ : Tuple = [x.strip() for x in open(lowerCamelCase).readlines()][: len(lowerCamelCase)] A_ : Tuple = calculate_rouge(lowerCamelCase , lowerCamelCase , **lowerCamelCase) if save_path is not None: save_json(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
713
'''simple docstring''' from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ): A_ : int = symbols(lowerCamelCase) A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase) A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase)) A_ : str = starting_point while True: if diff_function(lowerCamelCase) != 0: A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function( lowerCamelCase) else: raise ZeroDivisionError("""Could not find root""") from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess) < precision: return next_guess A_ : Union[str, Any] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( 'The root of log(y) - 1 = 0 is ', f"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""", ) # Find root of cos(x) print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
27
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""onnx"""] def __init__( self : Any ,*_a : int ,**_a : Optional[Any] ): '''simple docstring''' requires_backends(self ,["""onnx"""] ) @classmethod def _a ( cls : List[Any] ,*_a : Tuple ,**_a : str ): '''simple docstring''' requires_backends(cls ,["""onnx"""] ) @classmethod def _a ( cls : str ,*_a : List[Any] ,**_a : List[str] ): '''simple docstring''' requires_backends(cls ,["""onnx"""] )
714
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_a : Dict ): '''simple docstring''' super().__init__() A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a ) A_ : int = list(model.children() )[:-2] A_ : int = nn.Sequential(*_a ) A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self : str ,_a : Optional[int] ): '''simple docstring''' A_ : Tuple = self.pool(self.model(_a ) ) A_ : Any = torch.flatten(_a ,start_dim=2 ) A_ : str = out.transpose(1 ,2 ).contiguous() return out # BxNx2048 class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ): '''simple docstring''' A_ : Dict = [json.loads(_a ) for l in open(_a )] A_ : Optional[int] = os.path.dirname(_a ) A_ : Optional[Any] = tokenizer A_ : Optional[Any] = labels A_ : List[Any] = len(_a ) A_ : str = max_seq_length A_ : str = transforms def __len__( self : str ): '''simple docstring''' return len(self.data ) def __getitem__( self : Tuple ,_a : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) ) A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1] A_ : Optional[int] = sentence[: self.max_seq_length] A_ : Any = torch.zeros(self.n_classes ) A_ : Tuple = 1 A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" ) A_ : Union[str, Any] = self.transforms(_a ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self : List[Any] ): '''simple docstring''' A_ : str = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( lowerCamelCase : str): A_ : List[Any] = [len(row["""sentence"""]) for row in batch] A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase) A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)): A_ : str = input_row["""sentence"""] A_ : Tuple = 1 A_ : int = torch.stack([row["""image"""] for row in batch]) A_ : str = torch.stack([row["""label"""] for row in batch]) A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch]) A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch]) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ): return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ), ])
27
0
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters __magic_name__ = False __magic_name__ = False def lowerCamelCase ( lowerCamelCase : Namespace): return TrainCommand(lowerCamelCase) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod def _a ( _a : ArgumentParser ): '''simple docstring''' A_ : Union[str, Any] = parser.add_parser("""train""" ,help="""CLI tool to train a model on a task.""" ) train_parser.add_argument( """--train_data""" ,type=_a ,required=_a ,help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" ,) train_parser.add_argument( """--column_label""" ,type=_a ,default=0 ,help="""Column of the dataset csv file with example labels.""" ) train_parser.add_argument( """--column_text""" ,type=_a ,default=1 ,help="""Column of the dataset csv file with example texts.""" ) train_parser.add_argument( """--column_id""" ,type=_a ,default=2 ,help="""Column of the dataset csv file with example ids.""" ) train_parser.add_argument( """--skip_first_row""" ,action="""store_true""" ,help="""Skip the first row of the csv file (headers).""" ) train_parser.add_argument("""--validation_data""" ,type=_a ,default="""""" ,help="""path to validation dataset.""" ) train_parser.add_argument( """--validation_split""" ,type=_a ,default=0.1 ,help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" ,) train_parser.add_argument("""--output""" ,type=_a ,default="""./""" ,help="""path to saved the trained model.""" ) train_parser.add_argument( """--task""" ,type=_a ,default="""text_classification""" ,help="""Task to train the model on.""" ) train_parser.add_argument( """--model""" ,type=_a ,default="""bert-base-uncased""" ,help="""Model's name or path to stored model.""" ) train_parser.add_argument("""--train_batch_size""" ,type=_a ,default=32 ,help="""Batch size for training.""" ) train_parser.add_argument("""--valid_batch_size""" ,type=_a ,default=64 ,help="""Batch size for validation.""" ) train_parser.add_argument("""--learning_rate""" ,type=_a ,default=3e-5 ,help="""Learning rate.""" ) train_parser.add_argument("""--adam_epsilon""" ,type=_a ,default=1e-08 ,help="""Epsilon for Adam optimizer.""" ) train_parser.set_defaults(func=_a ) def __init__( self : Optional[Any] ,_a : Namespace ): '''simple docstring''' A_ : int = logging.get_logger("""transformers-cli/training""" ) A_ : Optional[Any] = """tf""" if is_tf_available() else """torch""" os.makedirs(args.output ,exist_ok=_a ) A_ : Any = args.output A_ : str = args.column_label A_ : List[Any] = args.column_text A_ : Dict = args.column_id self.logger.info(f'Loading {args.task} pipeline for {args.model}' ) if args.task == "text_classification": A_ : List[Any] = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f'Loading dataset from {args.train_data}' ) A_ : Tuple = Processor.create_from_csv( args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,) A_ : List[Any] = None if args.validation_data: self.logger.info(f'Loading validation dataset from {args.validation_data}' ) A_ : Tuple = Processor.create_from_csv( args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,) A_ : int = args.validation_split A_ : Optional[Any] = args.train_batch_size A_ : List[Any] = args.valid_batch_size A_ : List[Any] = args.learning_rate A_ : Optional[int] = args.adam_epsilon def _a ( self : Optional[int] ): '''simple docstring''' if self.framework == "tf": return self.run_tf() return self.run_torch() def _a ( self : List[Any] ): '''simple docstring''' raise NotImplementedError def _a ( self : Dict ): '''simple docstring''' self.pipeline.fit( self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,) # Save trained pipeline self.pipeline.save_pretrained(self.output )
715
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase ( lowerCamelCase : int): if num <= 0: A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.' raise ValueError(lowerCamelCase) A_ : str = [True] * (num + 1) A_ : Tuple = [] A_ : str = 2 A_ : Any = int(math.sqrt(lowerCamelCase)) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCamelCase) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCamelCase): if sieve[i] is True: A_ : Union[str, Any] = False start += 1 for j in range(end + 1 , num + 1): if sieve[j] is True: prime.append(lowerCamelCase) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
27
0
'''simple docstring''' import os import string import sys __magic_name__ = 1 << 8 __magic_name__ = { 'tab': ord('\t'), 'newline': ord('\r'), 'esc': 27, 'up': 65 + ARROW_KEY_FLAG, 'down': 66 + ARROW_KEY_FLAG, 'right': 67 + ARROW_KEY_FLAG, 'left': 68 + ARROW_KEY_FLAG, 'mod_int': 91, 'undefined': sys.maxsize, 'interrupt': 3, 'insert': 50, 'delete': 51, 'pg_up': 53, 'pg_down': 54, } __magic_name__ = KEYMAP['up'] __magic_name__ = KEYMAP['left'] if sys.platform == "win32": __magic_name__ = [] __magic_name__ = { b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG, b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG, } for i in range(10): __magic_name__ = ord(str(i)) def lowerCamelCase ( ): if os.name == "nt": import msvcrt A_ : List[Any] = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowerCamelCase) == 0: # Read the keystroke A_ : Optional[int] = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): A_ : Optional[int] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: A_ : Optional[int] = chr(WIN_KEYMAP[cha]) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""])) WIN_CH_BUFFER.append(lowerCamelCase) if ord(lowerCamelCase) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126)) A_ : List[Any] = chr(KEYMAP["""esc"""]) except KeyError: A_ : str = cha[1] else: A_ : List[Any] = ch.decode(lowerCamelCase) else: A_ : List[Any] = WIN_CH_BUFFER.pop(0) elif os.name == "posix": import termios import tty A_ : Optional[int] = sys.stdin.fileno() A_ : Union[str, Any] = termios.tcgetattr(lowerCamelCase) try: tty.setraw(lowerCamelCase) A_ : Optional[Any] = sys.stdin.read(1) finally: termios.tcsetattr(lowerCamelCase , termios.TCSADRAIN , lowerCamelCase) return ch def lowerCamelCase ( ): A_ : Union[str, Any] = get_raw_chars() if ord(lowerCamelCase) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowerCamelCase) == KEYMAP["esc"]: A_ : str = get_raw_chars() if ord(lowerCamelCase) == KEYMAP["mod_int"]: A_ : List[str] = get_raw_chars() if ord(lowerCamelCase) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCamelCase) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowerCamelCase) + ARROW_KEY_FLAG) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
716
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __magic_name__ = trt.Logger(trt.Logger.WARNING) __magic_name__ = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __magic_name__ = logging.getLogger(__name__) __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __magic_name__ = parser.parse_args() if args.tokenizer_name: __magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __magic_name__ = args.per_device_eval_batch_size __magic_name__ = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __magic_name__ = True __magic_name__ = 'temp_engine/bert-fp32.engine' if args.fpaa: __magic_name__ = 'temp_engine/bert-fp16.engine' if args.inta: __magic_name__ = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __magic_name__ = [network.get_input(i) for i in range(network.num_inputs)] __magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __magic_name__ = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __magic_name__ = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __magic_name__ = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]): A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa) A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa) A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase) # start time A_ : List[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Synchronize the stream and take time stream.synchronize() # end time A_ : str = time.time() A_ : Tuple = end_time - start_time A_ : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __magic_name__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __magic_name__ = raw_datasets['validation'].column_names __magic_name__ = 'question' if 'question' in column_names else column_names[0] __magic_name__ = 'context' if 'context' in column_names else column_names[1] __magic_name__ = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __magic_name__ = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __magic_name__ = min(args.max_seq_length, tokenizer.model_max_length) def lowerCamelCase ( lowerCamelCase : Dict): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. A_ : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. A_ : Union[str, Any] = [] for i in range(len(tokenized_examples["""input_ids"""])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase) A_ : Tuple = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. A_ : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. A_ : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i]) ] return tokenized_examples __magic_name__ = raw_datasets['validation'] # Validation Feature Creation __magic_name__ = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __magic_name__ = default_data_collator __magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __magic_name__ = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. A_ : Tuple = postprocess_qa_predictions( examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: A_ : Dict = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase) __magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize # Allocate device memory for inputs and outputs. __magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __magic_name__ = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f""" Num examples = {len(eval_dataset)}""") logger.info(f""" Batch size = {args.per_device_eval_batch_size}""") __magic_name__ = 0.0 __magic_name__ = 0 __magic_name__ = timeit.default_timer() __magic_name__ = None for step, batch in enumerate(eval_dataloader): __magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __magic_name__ , __magic_name__ = outputs __magic_name__ = torch.tensor(start_logits) __magic_name__ = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __magic_name__ = nested_truncate(all_preds, len(eval_dataset)) __magic_name__ = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds) __magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"""Evaluation metrics: {eval_metric}""")
27
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) def lowerCamelCase ( lowerCamelCase : Dict): '''simple docstring''' A_ : List[str] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: A_ : Union[str, Any] = [144, 192, 240] A_ : int = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: A_ : List[str] = [96, 120, 144] A_ : Any = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: A_ : Any = [64, 80, 96] A_ : List[str] = [16, 16, 24, 48, 64, 80, 320] A_ : Any = 0.05 A_ : List[Any] = 2.0 if mobilevit_name.startswith("""deeplabv3_"""): A_ : int = 512 A_ : Optional[int] = 16 A_ : List[Any] = 21 A_ : List[str] = """pascal-voc-id2label.json""" else: A_ : str = 1000 A_ : Any = """imagenet-1k-id2label.json""" A_ : Any = """huggingface/label-files""" A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r""")) A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()} A_ : Any = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False): '''simple docstring''' for i in range(1 , 6): if F'layer_{i}.' in name: A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.') if "conv_1." in name: A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""") if ".block." in name: A_ : Optional[Any] = name.replace(""".block.""" , """.""") if "exp_1x1" in name: A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""") if "red_1x1" in name: A_ : int = name.replace("""red_1x1""" , """reduce_1x1""") if ".local_rep.conv_3x3." in name: A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""") if ".local_rep.conv_1x1." in name: A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""") if ".norm." in name: A_ : Tuple = name.replace(""".norm.""" , """.normalization.""") if ".conv." in name: A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""") if ".conv_proj." in name: A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""") for i in range(0 , 2): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.') for i in range(2 , 6): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.') if "expand_1x1" in name: A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""") if "conv_3x3" in name: A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""") if "reduce_1x1" in name: A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""") for i in range(2 , 5): if F'.global_rep.{i}.weight' in name: A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""") if F'.global_rep.{i}.bias' in name: A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""") if ".global_rep." in name: A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""") if ".pre_norm_mha.0." in name: A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""") if ".pre_norm_mha.1.out_proj." in name: A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""") if ".pre_norm_ffn.0." in name: A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""") if ".pre_norm_ffn.1." in name: A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""") if ".pre_norm_ffn.4." in name: A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""") if ".transformer." in name: A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""") if ".aspp_layer." in name: A_ : int = name.replace(""".aspp_layer.""" , """.""") if ".aspp_pool." in name: A_ : Tuple = name.replace(""".aspp_pool.""" , """.""") if "seg_head." in name: A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""") if "segmentation_head.classifier.classifier." in name: A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""") if "classifier.fc." in name: A_ : str = name.replace("""classifier.fc.""" , """classifier.""") elif (not base_model) and ("segmentation_head." not in name): A_ : str = """mobilevit.""" + name return name def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False): '''simple docstring''' if base_model: A_ : Dict = """""" else: A_ : Any = """mobilevit.""" for key in orig_state_dict.copy().keys(): A_ : List[Any] = orig_state_dict.pop(lowerCamelCase) if key[:8] == "encoder.": A_ : int = key[8:] if "qkv" in key: A_ : Any = key.split(""".""") A_ : str = int(key_split[0][6:]) - 1 A_ : int = int(key_split[3]) A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}') A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size A_ : Optional[Any] = ( F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: A_ : Dict = val[:dim, :] A_ : Optional[int] = val[dim : dim * 2, :] A_ : List[Any] = val[-dim:, :] else: A_ : Optional[Any] = val[:dim] A_ : List[Any] = val[dim : dim * 2] A_ : Any = val[-dim:] else: A_ : List[str] = val return orig_state_dict def lowerCamelCase ( ): '''simple docstring''' A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw) return im @torch.no_grad() def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False): '''simple docstring''' A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase) # load original state_dict A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""") # load 🤗 model if mobilevit_name.startswith("""deeplabv3_"""): A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval() else: A_ : str = MobileViTForImageClassification(lowerCamelCase).eval() A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase) model.load_state_dict(lowerCamelCase) # Check outputs on an image, prepared by MobileViTImageProcessor A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32) A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""") A_ : List[Any] = model(**lowerCamelCase) A_ : Dict = outputs.logits if mobilevit_name.startswith("""deeplabv3_"""): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": A_ : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xs": A_ : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xxs": A_ : Tuple = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241]) elif mobilevit_name == "mobilevit_xs": A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587]) elif mobilevit_name == "mobilevit_xxs": A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4) Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase) print(F'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase) if push_to_hub: A_ : str = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""") A_ : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(lowerCamelCase , organization="""apple""") model.push_to_hub(lowerCamelCase , organization="""apple""") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __magic_name__ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
717
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __magic_name__ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['ConvNextFeatureExtractor'] __magic_name__ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
27
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
718
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_text_model""" def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Optional[int] = intermediate_size A_ : Optional[int] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : int = max_position_embeddings A_ : str = hidden_act A_ : Union[str, Any] = layer_norm_eps A_ : Tuple = attention_dropout A_ : Union[str, Any] = initializer_range A_ : List[Any] = initializer_factor @classmethod def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : int = cls.get_config_dict(_a ,**_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_vision_model""" def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,): '''simple docstring''' super().__init__(**_a ) A_ : List[str] = hidden_size A_ : Union[str, Any] = intermediate_size A_ : Union[str, Any] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : int = num_channels A_ : str = image_size A_ : List[Any] = patch_size A_ : int = hidden_act A_ : List[Any] = layer_norm_eps A_ : List[str] = attention_dropout A_ : str = initializer_range A_ : str = initializer_factor @classmethod def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : List[str] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit""" a_ = True def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,): '''simple docstring''' super().__init__(**_a ) if text_config is None: A_ : List[Any] = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) A_ : Dict = OwlViTTextConfig(**_a ) A_ : Dict = OwlViTVisionConfig(**_a ) A_ : Any = projection_dim A_ : Optional[int] = logit_scale_init_value A_ : Optional[int] = return_dict A_ : Dict = 1.0 @classmethod def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a ) if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) @classmethod def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ): '''simple docstring''' A_ : str = {} A_ : int = text_config A_ : Union[str, Any] = vision_config return cls.from_dict(_a ,**_a ) def _a ( self : Optional[int] ): '''simple docstring''' A_ : Dict = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : Optional[int] = self.vision_config.to_dict() A_ : List[Any] = self.__class__.model_type return output class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : int ): '''simple docstring''' return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _a ( self : str ): '''simple docstring''' return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _a ( self : Optional[Any] ): '''simple docstring''' return 1e-4 def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs( processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a ) A_ : Any = super().generate_dummy_inputs( processor.image_processor ,batch_size=_a ,framework=_a ) return {**text_input_dict, **image_input_dict} @property def _a ( self : Optional[Any] ): '''simple docstring''' return 14
27
0
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any] ,_a : int ,_a : int=7 ,_a : str=3 ,_a : Dict=18 ,_a : Optional[int]=30 ,_a : int=400 ,_a : List[Any]=True ,_a : List[str]=None ,_a : Any=True ,_a : Any=[0.5, 0.5, 0.5] ,_a : Tuple=[0.5, 0.5, 0.5] ,): '''simple docstring''' A_ : Union[str, Any] = size if size is not None else {"""height""": 18, """width""": 18} A_ : Tuple = parent A_ : Any = batch_size A_ : Tuple = num_channels A_ : int = image_size A_ : Union[str, Any] = min_resolution A_ : List[str] = max_resolution A_ : str = do_resize A_ : Dict = size A_ : str = do_normalize A_ : Union[str, Any] = image_mean A_ : str = image_std def _a ( self : Union[str, Any] ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = DPTImageProcessor if is_vision_available() else None def _a ( self : List[Any] ): '''simple docstring''' A_ : Tuple = DPTImageProcessingTester(self ) @property def _a ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : int ): '''simple docstring''' A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a ,"""image_mean""" ) ) self.assertTrue(hasattr(_a ,"""image_std""" ) ) self.assertTrue(hasattr(_a ,"""do_normalize""" ) ) self.assertTrue(hasattr(_a ,"""do_resize""" ) ) self.assertTrue(hasattr(_a ,"""size""" ) ) def _a ( self : Any ): '''simple docstring''' A_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} ) A_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} ) def _a ( self : Optional[int] ): '''simple docstring''' A_ : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a ,Image.Image ) # Test not batched input A_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched A_ : Union[str, Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def _a ( self : Tuple ): '''simple docstring''' A_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,numpify=_a ) for image in image_inputs: self.assertIsInstance(_a ,np.ndarray ) # Test not batched input A_ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched A_ : Optional[Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,torchify=_a ) for image in image_inputs: self.assertIsInstance(_a ,torch.Tensor ) # Test not batched input A_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched A_ : Union[str, Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,)
719
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""input_features""", """is_longer"""] def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,): '''simple docstring''' super().__init__( feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,) A_ : Tuple = top_db A_ : Tuple = truncation A_ : Optional[Any] = padding A_ : Optional[int] = fft_window_size A_ : Dict = (fft_window_size >> 1) + 1 A_ : Any = hop_length A_ : List[Any] = max_length_s A_ : Tuple = max_length_s * sampling_rate A_ : Tuple = sampling_rate A_ : Optional[int] = frequency_min A_ : Tuple = frequency_max A_ : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,) A_ : Dict = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,) def _a ( self : int ): '''simple docstring''' A_ : int = copy.deepcopy(self.__dict__ ) A_ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ): '''simple docstring''' A_ : List[str] = spectrogram( _a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,) return log_mel_spectrogram.T def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ): '''simple docstring''' A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk A_ : List[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk A_ : int = [0] # randomly choose index for each part A_ : List[str] = np.random.choice(ranges[0] ) A_ : int = np.random.choice(ranges[1] ) A_ : Optional[int] = np.random.choice(ranges[2] ) A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :] A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :] A_ : Dict = mel[idx_back : idx_back + chunk_frames, :] A_ : Optional[int] = torch.tensor(mel[None, None, :] ) A_ : Dict = torch.nn.functional.interpolate( _a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a ) A_ : str = mel_shrink[0][0].numpy() A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": A_ : Dict = True # random crop to max_length (for compatibility) -> this should be handled by self.pad A_ : Tuple = len(_a ) - max_length A_ : Optional[int] = np.random.randint(0 ,overflow + 1 ) A_ : List[Any] = waveform[idx : idx + max_length] A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed A_ : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 ) A_ : str = False else: A_ : str = self._random_mel_fusion(_a ,_a ,_a ) A_ : Optional[Any] = True else: raise NotImplementedError(f'data_truncating {truncation} not implemented' ) else: A_ : Optional[int] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": A_ : int = int(max_length / len(_a ) ) A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": A_ : List[str] = int(max_length / len(_a ) ) A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) ) A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,): '''simple docstring''' A_ : List[str] = truncation if truncation is not None else self.truncation A_ : List[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) A_ : int = is_batched_numpy or ( isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_a ,np.ndarray ): A_ : str = np.asarray(_a ,dtype=np.floataa ) elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Any = [np.asarray(_a )] # convert to mel spectrogram, truncate and pad if needed. A_ : str = [ self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a ) for waveform in raw_speech ] A_ : int = [] A_ : Any = [] for mel, longer in padded_inputs: input_mel.append(_a ) is_longer.append(_a ) if truncation == "fusion" and sum(_a ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer A_ : List[Any] = np.random.randint(0 ,len(_a ) ) A_ : List[str] = True if isinstance(input_mel[0] ,_a ): A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool A_ : List[str] = [[longer] for longer in is_longer] A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} A_ : int = BatchFeature(_a ) if return_tensors is not None: A_ : int = input_features.convert_to_tensors(_a ) return input_features
27
0
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_a : Dict ): '''simple docstring''' super().__init__() A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a ) A_ : int = list(model.children() )[:-2] A_ : int = nn.Sequential(*_a ) A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self : str ,_a : Optional[int] ): '''simple docstring''' A_ : Tuple = self.pool(self.model(_a ) ) A_ : Any = torch.flatten(_a ,start_dim=2 ) A_ : str = out.transpose(1 ,2 ).contiguous() return out # BxNx2048 class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ): '''simple docstring''' A_ : Dict = [json.loads(_a ) for l in open(_a )] A_ : Optional[int] = os.path.dirname(_a ) A_ : Optional[Any] = tokenizer A_ : Optional[Any] = labels A_ : List[Any] = len(_a ) A_ : str = max_seq_length A_ : str = transforms def __len__( self : str ): '''simple docstring''' return len(self.data ) def __getitem__( self : Tuple ,_a : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) ) A_ : Dict = sentence[0], sentence[1:-1], sentence[-1] A_ : Optional[int] = sentence[: self.max_seq_length] A_ : Any = torch.zeros(self.n_classes ) A_ : Tuple = 1 A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" ) A_ : Union[str, Any] = self.transforms(_a ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self : List[Any] ): '''simple docstring''' A_ : str = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( lowerCamelCase : str): A_ : List[Any] = [len(row["""sentence"""]) for row in batch] A_ : Dict = len(lowerCamelCase), max(lowerCamelCase) A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)): A_ : str = input_row["""sentence"""] A_ : Tuple = 1 A_ : int = torch.stack([row["""image"""] for row in batch]) A_ : str = torch.stack([row["""label"""] for row in batch]) A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch]) A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch]) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ): return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ), ])
720
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,): '''simple docstring''' A_ : Optional[Any] = parent A_ : str = batch_size A_ : int = seq_length A_ : Union[str, Any] = is_training A_ : Optional[Any] = use_token_type_ids A_ : int = use_labels A_ : Dict = vocab_size A_ : List[Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : int = intermediate_size A_ : Tuple = hidden_act A_ : int = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Tuple = type_sequence_label_size A_ : int = initializer_range A_ : Optional[Any] = num_labels A_ : str = num_choices A_ : Optional[Any] = scope A_ : List[Any] = self.vocab_size - 1 def _a ( self : Any ): '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : List[Any] = None if self.use_token_type_ids: A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : int = None A_ : str = None A_ : Union[str, Any] = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Any = ids_tensor([self.batch_size] ,self.num_choices ) A_ : List[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ): '''simple docstring''' A_ : Optional[Any] = OpenAIGPTModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a ) A_ : str = model(_a ,token_type_ids=_a ) A_ : Dict = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ): '''simple docstring''' A_ : str = OpenAIGPTLMHeadModel(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ): '''simple docstring''' A_ : Any = OpenAIGPTDoubleHeadsModel(_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ): '''simple docstring''' A_ : List[str] = self.num_labels A_ : int = OpenAIGPTForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : str = config_and_inputs A_ : int = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) a_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly a_ = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ): '''simple docstring''' A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A_ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,) A_ : Any = inputs_dict["""labels"""] A_ : Any = inputs_dict["""labels"""] A_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,) A_ : int = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=_a ) return inputs_dict def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Tuple = OpenAIGPTModelTester(self ) A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 ) def _a ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_a ) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a ) @slow def _a ( self : List[Any] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : List[str] ): '''simple docstring''' A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(_a ) A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is A_ : Dict = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A_ : int = model.generate(_a ,do_sample=_a ) self.assertListEqual(output_ids[0].tolist() ,_a )
27
0
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : str): A_ : int = 0 # if input_string is "aba" than new_input_string become "a|b|a" A_ : Dict = """""" A_ : Any = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(lowerCamelCase) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring A_ : List[Any] = 0, 0 # length[i] shows the length of palindromic substring with center i A_ : Union[str, Any] = [1 for i in range(len(lowerCamelCase))] # for each character in new_string find corresponding palindromic string A_ : Union[str, Any] = 0 for j in range(len(lowerCamelCase)): A_ : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1) while ( j - k >= 0 and j + k < len(lowerCamelCase) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 A_ : Optional[Any] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: A_ : str = j - k + 1 # noqa: E741 A_ : Any = j + k - 1 # update max_length and start position if max_length < length[j]: A_ : str = length[j] A_ : List[str] = j # create that string A_ : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
721
'''simple docstring''' import baseaa def lowerCamelCase ( lowerCamelCase : str): return baseaa.aaaencode(string.encode("""utf-8""")) def lowerCamelCase ( lowerCamelCase : bytes): return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""") if __name__ == "__main__": import doctest doctest.testmod()
27
0
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py A_ = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) A_ = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Tuple ): """simple docstring""" _snake_case : Union[str, Any] = SavedModel() _snake_case : Optional[Any] = [] with open(os.path.join(snake_case__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f: _snake_case : Any = json.load(snake_case__ )["""opsets"""] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(snake_case__ )] ) with open(snake_case__ , """rb""" ) as f: saved_model.ParseFromString(f.read() ) _snake_case : List[Any] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want _snake_case : Tuple = sorted(snake_case__ ) _snake_case : str = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(snake_case__ ) if strict and len(snake_case__ ) > 0: raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops ) elif len(snake_case__ ) > 0: print(F"Found the following incompatible ops for the opset {opset}:" ) print(*snake_case__ , sep="""\n""" ) else: print(F"The saved model {saved_model_path} can properly be converted with ONNX." ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) A_ = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
28
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ): """simple docstring""" _snake_case : Optional[Any] = [] for old_item in old_list: _snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" ) _snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" ) _snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" ) _snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" ) _snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ): """simple docstring""" _snake_case : Dict = [] for old_item in old_list: _snake_case : Dict = old_item _snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _snake_case : Union[str, Any] = old_checkpoint[path] _snake_case : Optional[int] = old_tensor.shape[0] // 3 _snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 ) _snake_case : Union[str, Any] = query.reshape(snake_case__ ) _snake_case : Tuple = key.reshape(snake_case__ ) _snake_case : Any = value.reshape(snake_case__ ) for path in paths: _snake_case : List[Any] = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0] else: _snake_case : Optional[Any] = old_checkpoint[path["""old"""]] def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : int = {} _snake_case : Tuple = checkpoint["""time_embed.0.weight"""] _snake_case : List[str] = checkpoint["""time_embed.0.bias"""] _snake_case : List[str] = checkpoint["""time_embed.2.weight"""] _snake_case : Tuple = checkpoint["""time_embed.2.bias"""] _snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""] _snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""] _snake_case : List[Any] = checkpoint["""out.0.weight"""] _snake_case : Any = checkpoint["""out.0.bias"""] _snake_case : Any = checkpoint["""out.2.weight"""] _snake_case : List[str] = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _snake_case : Any = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only _snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _snake_case : Optional[int] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only _snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _snake_case : List[Any] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): _snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1) _snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _snake_case : Union[str, Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] _snake_case : Dict = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _snake_case : Optional[int] = renew_resnet_paths(snake_case__ ) _snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : List[str] = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : Optional[int] = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) _snake_case : int = middle_blocks[0] _snake_case : List[str] = middle_blocks[1] _snake_case : Any = middle_blocks[2] _snake_case : Dict = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Any = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Dict = renew_attention_paths(snake_case__ ) _snake_case : Tuple = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): _snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1) _snake_case : Dict = i % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] _snake_case : Any = {} for layer in output_block_layers: _snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: _snake_case : str = [layer_name] if len(snake_case__ ) > 1: _snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _snake_case : List[Any] = renew_resnet_paths(snake_case__ ) _snake_case : int = renew_resnet_paths(snake_case__ ) _snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _snake_case : Any = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _snake_case : Optional[int] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: _snake_case : Any = [] if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : str = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : int = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , ) else: _snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] ) _snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] ) _snake_case : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A_ = parser.parse_args() A_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ = json.loads(f.read()) A_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
28
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _snake_case : Dict = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" from typing import Any def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not input_list: return [] _snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list] _snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowercase( __a ): '''simple docstring''' lowercase__ = "char" lowercase__ = "bpe" lowercase__ = "wp" A_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "char_tokenizer"] lowercase__ = "ViTImageProcessor" lowercase__ = "MgpstrTokenizer" def __init__( self: Tuple, a_: Any=None, a_: Tuple=None, **a_: Dict ): '''simple docstring''' _snake_case : List[str] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""", a_, ) _snake_case : List[str] = kwargs.pop("""feature_extractor""" ) _snake_case : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) _snake_case : List[str] = tokenizer _snake_case : Tuple = AutoTokenizer.from_pretrained("""gpt2""" ) _snake_case : List[Any] = AutoTokenizer.from_pretrained("""bert-base-uncased""" ) super().__init__(a_, a_ ) def __call__( self: Any, a_: Any=None, a_: List[Any]=None, a_: Any=None, **a_: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _snake_case : int = self.image_processor(a_, return_tensors=a_, **a_ ) if text is not None: _snake_case : Union[str, Any] = self.char_tokenizer(a_, return_tensors=a_, **a_ ) if text is None: return inputs elif images is None: return encodings else: _snake_case : Optional[int] = encodings["""input_ids"""] return inputs def UpperCamelCase_ ( self: str, a_: Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : Union[str, Any] = sequences _snake_case : List[Any] = char_preds.size(0 ) _snake_case , _snake_case : Optional[int] = self._decode_helper(a_, """char""" ) _snake_case , _snake_case : Union[str, Any] = self._decode_helper(a_, """bpe""" ) _snake_case , _snake_case : List[str] = self._decode_helper(a_, """wp""" ) _snake_case : List[str] = [] _snake_case : List[Any] = [] for i in range(a_ ): _snake_case : Any = [char_scores[i], bpe_scores[i], wp_scores[i]] _snake_case : str = [char_strs[i], bpe_strs[i], wp_strs[i]] _snake_case : Union[str, Any] = scores.index(max(a_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _snake_case : Tuple = {} _snake_case : List[str] = final_strs _snake_case : Optional[int] = final_scores _snake_case : Optional[int] = char_strs _snake_case : Optional[Any] = bpe_strs _snake_case : Dict = wp_strs return out def UpperCamelCase_ ( self: Any, a_: List[Any], a_: Any ): '''simple docstring''' if format == DecodeType.CHARACTER: _snake_case : int = self.char_decode _snake_case : Optional[int] = 1 _snake_case : str = """[s]""" elif format == DecodeType.BPE: _snake_case : Dict = self.bpe_decode _snake_case : Any = 2 _snake_case : str = """#""" elif format == DecodeType.WORDPIECE: _snake_case : Optional[int] = self.wp_decode _snake_case : Optional[int] = 102 _snake_case : List[str] = """[SEP]""" else: raise ValueError(f"Format {format} is not supported." ) _snake_case , _snake_case : List[str] = [], [] _snake_case : str = pred_logits.size(0 ) _snake_case : List[str] = pred_logits.size(1 ) _snake_case , _snake_case : List[Any] = pred_logits.topk(1, dim=-1, largest=a_, sorted=a_ ) _snake_case : Optional[int] = preds_index.view(-1, a_ )[:, 1:] _snake_case : Tuple = decoder(a_ ) _snake_case , _snake_case : Any = torch.nn.functional.softmax(a_, dim=2 ).max(dim=2 ) _snake_case : Dict = preds_max_prob[:, 1:] for index in range(a_ ): _snake_case : Optional[int] = preds_str[index].find(a_ ) _snake_case : Dict = preds_str[index][:pred_eos] _snake_case : List[Any] = preds_index[index].cpu().tolist() _snake_case : Tuple = pred_index.index(a_ ) if eos_token in pred_index else -1 _snake_case : str = preds_max_prob[index][: pred_eos_index + 1] _snake_case : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(a_ ) conf_scores.append(a_ ) return dec_strs, conf_scores def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = [seq.replace(""" """, """""" ) for seq in self.char_tokenizer.batch_decode(a_ )] return decode_strs def UpperCamelCase_ ( self: Optional[Any], a_: Dict ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(a_ ) def UpperCamelCase_ ( self: List[Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : str = [seq.replace(""" """, """""" ) for seq in self.wp_tokenizer.batch_decode(a_ )] return decode_strs
28
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Any = num_channels _snake_case : Union[str, Any] = patch_size _snake_case : Dict = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : Any = layer_norm_eps _snake_case : int = stop_gradient _snake_case : Any = share_layernorm _snake_case : List[Any] = remove_last_layer @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = hidden_act _snake_case : List[Any] = initializer_factor _snake_case : Optional[int] = intermediate_size _snake_case : int = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : Optional[int] = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Dict = position_embedding_type _snake_case : Dict = use_cache _snake_case : int = pad_token_id _snake_case : Union[str, Any] = bos_token_id _snake_case : Union[str, Any] = eos_token_id @classmethod def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ): '''simple docstring''' _snake_case : str = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : str = share_cross_modal_transformer_layers _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_size _snake_case : Union[str, Any] = initializer_factor _snake_case : Dict = layer_norm_eps _snake_case : Dict = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : int = num_hidden_layers _snake_case : int = tie_word_embeddings _snake_case : Optional[Any] = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : str = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : Any = BridgeTowerTextConfig(**a_ ) _snake_case : List[Any] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : str = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
28
1
"""simple docstring""" import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowercase( __a , __a ): '''simple docstring''' @register_to_config def __init__( self: str, *, a_: int = 4, a_: int = 768, a_: int, a_: str, ): '''simple docstring''' super().__init__() _snake_case : Optional[int] = nn.Parameter(torch.zeros(a_ ) ) # parameters for additional clip time embeddings _snake_case : List[Any] = nn.Linear(a_, a_ ) _snake_case : str = nn.Linear(a_, a_ ) # parameters for encoder hidden states _snake_case : Optional[Any] = clip_extra_context_tokens _snake_case : Any = nn.Linear( a_, self.clip_extra_context_tokens * cross_attention_dim ) _snake_case : Optional[int] = nn.Linear(a_, a_ ) _snake_case : Dict = nn.LayerNorm(a_ ) def UpperCamelCase_ ( self: Any, *, a_: Union[str, Any], a_: List[Any], a_: Optional[Any], a_: Dict ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _snake_case : Union[str, Any] = image_embeddings.shape[0] _snake_case : str = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _snake_case : Union[str, Any] = classifier_free_guidance_embeddings.expand( a_, -1 ) _snake_case : Tuple = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _snake_case : str = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _snake_case : Tuple = self.embedding_proj(a_ ) _snake_case : Tuple = self.clip_image_embeddings_project_to_time_embeddings(a_ ) _snake_case : str = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _snake_case : int = self.clip_extra_context_tokens_proj(a_ ) _snake_case : Any = clip_extra_context_tokens.reshape(a_, -1, self.clip_extra_context_tokens ) _snake_case : str = clip_extra_context_tokens.permute(0, 2, 1 ) _snake_case : int = self.encoder_hidden_states_proj(a_ ) _snake_case : str = self.text_encoder_hidden_states_norm(a_ ) _snake_case : List[str] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
28
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" ) return image def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Optional[int] = val def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" ) _snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict _snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) _snake_case : Dict = qkv_bias def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24 _snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict() elif "opt-6.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict() elif "t5-xl" in model_name: _snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ ) return config, image_size @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ): """simple docstring""" _snake_case : List[str] = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0] _snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ ) _snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval() _snake_case : int = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _snake_case , _snake_case : List[Any] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" _snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print("""Done!""" ) # update state dict keys _snake_case : Any = original_model.state_dict() _snake_case : Dict = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _snake_case : str = state_dict.pop(snake_case__ ) if key.startswith("""Qformer.bert""" ): _snake_case : str = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _snake_case : Any = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _snake_case : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _snake_case : List[Any] = key.replace("""t5""" , """language""" ) _snake_case : str = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) _snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) assert len(snake_case__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _snake_case : Any = load_demo_image() _snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) _snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ ) # create processor _snake_case : Any = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) _snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case__ , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "opt" in model_name: _snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _snake_case : int = hf_model(snake_case__ , snake_case__ ).logits else: _snake_case : str = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _snake_case : List[str] = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _snake_case : Union[str, Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ ) else: # cast to same type _snake_case : int = logits.dtype assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _snake_case : Any = """""" _snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ ) _snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} ) _snake_case : Tuple = hf_model.generate( snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , snake_case__ ) _snake_case : Optional[Any] = input_ids.shape[1] _snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ ) _snake_case : Optional[Any] = [text.strip() for text in output_text] print("""HF generation:""" , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F"nielsr/{model_name}" ) hf_model.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() A_ = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) A_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
"""simple docstring""" from typing import Any def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not input_list: return [] _snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list] _snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : List[str] = parent_key + sep + k if parent_key else k if isinstance(snake_case__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() ) else: items.append((new_key, v) ) return dict(snake_case__ ) _snake_case : Dict = argparse.Namespace() with open(snake_case__ , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader ) _snake_case : Any = flatten_yaml_as_dict(snake_case__ ) for k, v in flat_cfg.items(): setattr(snake_case__ , snake_case__ , snake_case__ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) ) return config def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Dict = MobileViTVaConfig() _snake_case : Optional[int] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Union[str, Any] = 3_84 else: _snake_case : Optional[Any] = 2_56 _snake_case : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : str = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Union[str, Any] = 2_56 _snake_case : Tuple = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _snake_case : Tuple = 1_51 _snake_case : str = 5_12 _snake_case : List[Any] = """ade20k-id2label.json""" _snake_case : Union[str, Any] = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : int = """pascal-voc-id2label.json""" _snake_case : int = True # orig_config _snake_case : int = load_orig_config_file(snake_case__ ) assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Union[str, Any] = """huggingface/label-files""" _snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Tuple = idalabel _snake_case : Any = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = dct.pop(snake_case__ ) _snake_case : List[Any] = val def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ): """simple docstring""" if base_model: _snake_case : Any = """""" else: _snake_case : Union[str, Any] = """mobilevitv2.""" _snake_case : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : List[str] = k[8:] else: _snake_case : str = k if ".block." in k: _snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Any = [0, 1, 2, 3] elif i == 5: _snake_case : List[Any] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : Any = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : List[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case__ ) for k in keys_to_ignore: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): """simple docstring""" _snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval() _snake_case : Optional[Any] = False # remove and rename some keys of load the original model _snake_case : Union[str, Any] = checkpoint remove_unused_keys(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load modified state_dict model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : List[str] = outputs.logits _snake_case : Any = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = 2 _snake_case : List[str] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, '''src''', '''diffusers''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" ) self.assertEqual(a_, """torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(a_, """torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case : Union[str, Any] = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(a_, """torch_and_transformers_and_onnx""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""", a_ ) self.assertIn("""torch_and_transformers""", a_ ) self.assertIn("""flax_and_transformers""", a_ ) self.assertIn("""torch_and_transformers_and_onnx""", a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""", objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" ) self.assertEqual(a_, """\nCONSTANT = None\n""" ) _snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" ) self.assertEqual( a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) _snake_case : List[Any] = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ _snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ _snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""], a_ )
28
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : list[int] ): # This function is recursive """simple docstring""" _snake_case : Optional[int] = len(snake_case__ ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else _snake_case : Dict = array[0] _snake_case : List[Any] = False _snake_case : Any = 1 _snake_case : list[int] = [] while not is_found and i < array_length: if array[i] < pivot: _snake_case : int = True _snake_case : str = [element for element in array[i:] if element >= array[i]] _snake_case : Optional[int] = longest_subsequence(snake_case__ ) if len(snake_case__ ) > len(snake_case__ ): _snake_case : str = temp_array else: i += 1 _snake_case : Optional[int] = [element for element in array[1:] if element >= pivot] _snake_case : Union[str, Any] = [pivot, *longest_subsequence(snake_case__ )] if len(snake_case__ ) > len(snake_case__ ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A_ = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''OwlViTFeatureExtractor'''] A_ = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
1
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[str] ): """simple docstring""" try: with open(snake_case__ , """rb""" ) as flax_state_f: _snake_case : int = from_bytes(snake_case__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(snake_case__ ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[Any] ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights _snake_case : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) _snake_case : Tuple = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) _snake_case : Optional[int] = """""" _snake_case : Dict = flatten_dict(snake_case__ , sep=""".""" ) _snake_case : int = pt_model.state_dict() # keep track of unexpected & missing keys _snake_case : Optional[Any] = [] _snake_case : str = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _snake_case : Tuple = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: _snake_case : str = flax_key_tuple_array[:-1] + ["""weight"""] _snake_case : str = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": _snake_case : List[str] = flax_key_tuple_array[:-1] + ["""weight"""] _snake_case : str = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": _snake_case : Union[str, Any] = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(snake_case__ ): _snake_case : str = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) _snake_case : Dict = """.""".join(snake_case__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict _snake_case : Optional[Any] = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor _snake_case : Optional[Any] = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list _snake_case : Tuple = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(snake_case__ ) > 0: logger.warning( F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" """ use it for predictions and inference.""" ) return pt_model
28
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ): """simple docstring""" def run_func(snake_case__ : Tuple ): @wraps(snake_case__ ) def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ): return func(*snake_case__ , **snake_case__ ) @wraps(snake_case__ ) @tf.function(experimental_compile=snake_case__ ) def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ): return func(*snake_case__ , **snake_case__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = random.Random() _snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowercase( __a ): '''simple docstring''' lowercase__ = 42 lowercase__ = 42 lowercase__ = "TensorFlow" @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return tf.__version__ def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[str] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_speed(_inference ) def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ ) return self._measure_speed(_train ) def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_memory(_inference ) def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ ) return self._measure_memory(_train ) def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : List[Any] = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Dict = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : List[str] = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_forward(): return model(a_, decoder_input_ids=a_, training=a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_forward(): return model(a_, training=a_ ) _snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : Tuple = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : str = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Tuple = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : int = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_train(): _snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0] _snake_case : str = tf.gradients(a_, model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_train(): _snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0] _snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables ) return gradients _snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(a_, repeat=1, number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case : Dict = timeit.repeat( a_, repeat=self.args.repeat, number=10, ) return min(a_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _snake_case : List[Any] = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _snake_case : Optional[Any] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ ) _snake_case : List[str] = meminfo.used _snake_case : Any = Memory(a_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _snake_case : List[Any] = None else: _snake_case : int = measure_peak_memory_cpu(a_ ) _snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case : Tuple = stop_memory_tracing(a_ ) if memory is None: _snake_case : int = summary.total else: _snake_case : int = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) return "N/A", None
28
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor A_ = logging.get_logger(__name__) class lowercase( __a ): '''simple docstring''' def __init__( self: int, *a_: Dict, **a_: Optional[int] ): '''simple docstring''' warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""", a_, ) super().__init__(*a_, **a_ )
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ): """simple docstring""" _snake_case : str = int(snake_case__ ) # Initialize Result _snake_case : str = [] # Traverse through all denomination for denomination in reversed(snake_case__ ): # Find denominations while int(snake_case__ ) >= int(snake_case__ ): total_value -= int(snake_case__ ) answer.append(snake_case__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A_ = [] A_ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): A_ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) A_ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] A_ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F'''Following is minimal change for {value}: ''') A_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
28
1
"""simple docstring""" import copy import random from transformers import CLIPTokenizer class lowercase( __a ): '''simple docstring''' def __init__( self: Optional[int], *a_: Optional[Any], **a_: Dict ): '''simple docstring''' super().__init__(*a_, **a_ ) _snake_case : int = {} def UpperCamelCase_ ( self: Union[str, Any], a_: Dict, *a_: Any, **a_: List[Any] ): '''simple docstring''' _snake_case : Dict = super().add_tokens(a_, *a_, **a_ ) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {placeholder_token}. Please pass a different" """ `placeholder_token` that is not already in the tokenizer.""" ) def UpperCamelCase_ ( self: Optional[int], a_: Tuple, *a_: Dict, a_: int=1, **a_: Any ): '''simple docstring''' _snake_case : str = [] if num_vec_per_token == 1: self.try_adding_tokens(a_, *a_, **a_ ) output.append(a_ ) else: _snake_case : int = [] for i in range(a_ ): _snake_case : Union[str, Any] = placeholder_token + f"_{i}" self.try_adding_tokens(a_, *a_, **a_ ) output.append(a_ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f"The tokenizer already has placeholder token {token} that can get confused with" f" {placeholder_token}keep placeholder tokens independent" ) _snake_case : Tuple = output def UpperCamelCase_ ( self: Tuple, a_: Optional[Any], a_: Dict=False, a_: Dict=1.0 ): '''simple docstring''' if isinstance(a_, a_ ): _snake_case : Tuple = [] for i in range(len(a_ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=a_ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: _snake_case : str = self.token_map[placeholder_token] _snake_case : Optional[int] = tokens[: 1 + int(len(a_ ) * prop_tokens_to_load )] if vector_shuffle: _snake_case : Any = copy.copy(a_ ) random.shuffle(a_ ) _snake_case : str = text.replace(a_, """ """.join(a_ ) ) return text def __call__( self: Union[str, Any], a_: str, *a_: Union[str, Any], a_: int=False, a_: Dict=1.0, **a_: int ): '''simple docstring''' return super().__call__( self.replace_placeholder_tokens_in_text( a_, vector_shuffle=a_, prop_tokens_to_load=a_ ), *a_, **a_, ) def UpperCamelCase_ ( self: Optional[int], a_: Optional[Any], *a_: Dict, a_: Optional[Any]=False, a_: Optional[Any]=1.0, **a_: Union[str, Any] ): '''simple docstring''' return super().encode( self.replace_placeholder_tokens_in_text( a_, vector_shuffle=a_, prop_tokens_to_load=a_ ), *a_, **a_, )
28
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowercase: '''simple docstring''' def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ): '''simple docstring''' _snake_case : Optional[int] = parent _snake_case : Optional[Any] = 100 _snake_case : Any = batch_size _snake_case : List[Any] = image_size _snake_case : Optional[Any] = patch_size _snake_case : str = num_channels _snake_case : Tuple = is_training _snake_case : Tuple = use_labels _snake_case : Any = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Union[str, Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : str = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : Optional[Any] = type_sequence_label_size _snake_case : Any = initializer_range _snake_case : List[str] = scope _snake_case : int = out_indices _snake_case : Optional[Any] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case : Dict = (image_size // patch_size) ** 2 _snake_case : str = num_patches + 1 def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : List[Any] = None _snake_case : Tuple = None if self.use_labels: _snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) _snake_case : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ): '''simple docstring''' _snake_case : str = BeitModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = BeitForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.type_sequence_label_size _snake_case : Any = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case : Any = 1 _snake_case : str = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : Optional[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ): '''simple docstring''' _snake_case : List[str] = self.num_labels _snake_case : List[Any] = BeitForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() _snake_case : List[str] = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) _snake_case : str = model(a_, labels=a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = BeitModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""BEiT does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) _snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_, nn.Linear ) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Any = model_class(a_ ) _snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[Any] = [*signature.parameters.keys()] _snake_case : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]: continue _snake_case : List[Any] = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : List[Any] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _snake_case : Dict = False _snake_case : Optional[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Any = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() _snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : int = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : int = _config_zero_init(a_ ) for model_class in self.all_model_classes: _snake_case : Tuple = model_class(config=a_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = BeitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ ) _snake_case : Dict = self.default_image_processor _snake_case : Dict = prepare_img() _snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ ) # prepare bool_masked_pos _snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Optional[int] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ ) _snake_case : List[Any] = self.default_image_processor _snake_case : Any = prepare_img() _snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(**a_ ) _snake_case : Optional[int] = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(logits.shape, a_ ) _snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : str = 281 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to( a_ ) _snake_case : int = self.default_image_processor _snake_case : Optional[Any] = prepare_img() _snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Union[str, Any] = model(**a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 21_841) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : List[str] = 2_396 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : int = model.to(a_ ) _snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] ) _snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits # verify the logits _snake_case : List[str] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" ) if is_pillow_less_than_a: _snake_case : Any = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ], device=a_, ) else: _snake_case : Optional[Any] = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ], device=a_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : List[Any] = model.to(a_ ) _snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : str = Image.open(ds[0]["""file"""] ) _snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits.detach().cpu() _snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] ) _snake_case : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape, a_ ) _snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ ) _snake_case : List[str] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape, a_ )
28
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva A_ = '''''' A_ = '''''' A_ = '''''' A_ = 1 # (0 is vertical, 1 is horizontal) def UpperCAmelCase__ (): """simple docstring""" _snake_case , _snake_case : Any = get_dataset(snake_case__ , snake_case__ ) print("""Processing...""" ) _snake_case , _snake_case , _snake_case : List[Any] = update_image_and_anno(snake_case__ , snake_case__ , snake_case__ ) for index, image in enumerate(snake_case__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _snake_case : Optional[Any] = random_chars(32 ) _snake_case : Tuple = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] _snake_case : Dict = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(F"/{file_root}.jpg" , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Success {index+1}/{len(snake_case__ )} with {file_name}" ) _snake_case : Optional[int] = [] for anno in new_annos[index]: _snake_case : str = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(snake_case__ ) with open(F"/{file_root}.txt" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] _snake_case : List[str] = [] for label_file in glob.glob(os.path.join(snake_case__ , """*.txt""" ) ): _snake_case : int = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(snake_case__ ) as in_file: _snake_case : Union[str, Any] = in_file.readlines() _snake_case : int = os.path.join(snake_case__ , F"{label_name}.jpg" ) _snake_case : str = [] for obj_list in obj_lists: _snake_case : Dict = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(snake_case__ ) labels.append(snake_case__ ) return img_paths, labels def UpperCAmelCase__ (snake_case__ : list , snake_case__ : list , snake_case__ : int = 1 ): """simple docstring""" _snake_case : Dict = [] _snake_case : Any = [] _snake_case : List[Any] = [] for idx in range(len(snake_case__ ) ): _snake_case : Optional[int] = [] _snake_case : Any = img_list[idx] path_list.append(snake_case__ ) _snake_case : Tuple = anno_list[idx] _snake_case : Any = cva.imread(snake_case__ ) if flip_type == 1: _snake_case : Optional[int] = cva.flip(snake_case__ , snake_case__ ) for bbox in img_annos: _snake_case : List[str] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _snake_case : List[Any] = cva.flip(snake_case__ , snake_case__ ) for bbox in img_annos: _snake_case : int = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(snake_case__ ) new_imgs_list.append(snake_case__ ) return new_imgs_list, new_annos_lists, path_list def UpperCAmelCase__ (snake_case__ : int = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _snake_case : Dict = ascii_lowercase + digits return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) ) if __name__ == "__main__": main() print('''DONE ✅''')
28
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase( __a ): '''simple docstring''' lowercase__ = (IPNDMScheduler,) lowercase__ = (("num_inference_steps", 50),) def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = {"""num_train_timesteps""": 1_000} config.update(**a_ ) return config def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = dict(self.forward_default_kwargs ) _snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[Any] = self.dummy_sample _snake_case : Dict = 0.1 * sample _snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : int = self.get_scheduler_config(**a_ ) _snake_case : Dict = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : int = dummy_past_residuals[:] if time_step is None: _snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : Tuple = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : Optional[Any] = dummy_past_residuals[:] _snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[int] = self.dummy_sample _snake_case : Tuple = 0.1 * sample _snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : Any = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case : Union[str, Any] = dummy_past_residuals[:] if time_step is None: _snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : List[str] = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case : List[str] = dummy_past_residuals[:] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config(**a_ ) _snake_case : List[Any] = scheduler_class(**a_ ) _snake_case : Union[str, Any] = 10 _snake_case : Union[str, Any] = self.dummy_model() _snake_case : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : Optional[Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _snake_case : Union[str, Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample return sample def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : int = kwargs.pop("""num_inference_steps""", a_ ) for scheduler_class in self.scheduler_classes: _snake_case : Union[str, Any] = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) _snake_case : Dict = self.dummy_sample _snake_case : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ): _snake_case : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _snake_case : List[str] = dummy_past_residuals[:] _snake_case : Optional[int] = scheduler.timesteps[5] _snake_case : Optional[Any] = scheduler.timesteps[6] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.full_loop() _snake_case : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
28
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_ = logging.get_logger(__name__) A_ = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class lowercase( __a ): '''simple docstring''' lowercase__ = "deformable_detr" lowercase__ = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self: Tuple, a_: List[str]=True, a_: str=None, a_: Tuple=3, a_: Optional[int]=300, a_: List[Any]=1_024, a_: Union[str, Any]=6, a_: int=1_024, a_: Optional[int]=8, a_: Tuple=6, a_: List[str]=1_024, a_: Dict=8, a_: str=0.0, a_: Dict=True, a_: Any="relu", a_: List[str]=256, a_: List[Any]=0.1, a_: List[Any]=0.0, a_: Optional[int]=0.0, a_: Tuple=0.02, a_: List[Any]=1.0, a_: Union[str, Any]=True, a_: Dict=False, a_: List[str]="sine", a_: Optional[int]="resnet50", a_: List[str]=True, a_: Optional[Any]=False, a_: Tuple=4, a_: List[str]=4, a_: Tuple=4, a_: List[Any]=False, a_: List[Any]=300, a_: Dict=False, a_: Optional[int]=1, a_: int=5, a_: Union[str, Any]=2, a_: Optional[int]=1, a_: Tuple=1, a_: Tuple=5, a_: int=2, a_: Tuple=0.1, a_: Dict=0.25, a_: str=False, **a_: Dict, ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _snake_case : str = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(a_, a_ ): _snake_case : List[Any] = backbone_config.get("""model_type""" ) _snake_case : int = CONFIG_MAPPING[backbone_model_type] _snake_case : Optional[Any] = config_class.from_dict(a_ ) _snake_case : Any = use_timm_backbone _snake_case : List[str] = backbone_config _snake_case : int = num_channels _snake_case : Any = num_queries _snake_case : str = max_position_embeddings _snake_case : int = d_model _snake_case : Optional[int] = encoder_ffn_dim _snake_case : List[str] = encoder_layers _snake_case : int = encoder_attention_heads _snake_case : str = decoder_ffn_dim _snake_case : int = decoder_layers _snake_case : List[Any] = decoder_attention_heads _snake_case : int = dropout _snake_case : Tuple = attention_dropout _snake_case : int = activation_dropout _snake_case : List[str] = activation_function _snake_case : List[Any] = init_std _snake_case : int = init_xavier_std _snake_case : Optional[int] = encoder_layerdrop _snake_case : int = auxiliary_loss _snake_case : Any = position_embedding_type _snake_case : Optional[Any] = backbone _snake_case : Tuple = use_pretrained_backbone _snake_case : int = dilation # deformable attributes _snake_case : str = num_feature_levels _snake_case : str = encoder_n_points _snake_case : Optional[int] = decoder_n_points _snake_case : str = two_stage _snake_case : Optional[int] = two_stage_num_proposals _snake_case : Dict = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher _snake_case : Tuple = class_cost _snake_case : List[Any] = bbox_cost _snake_case : List[str] = giou_cost # Loss coefficients _snake_case : List[Any] = mask_loss_coefficient _snake_case : List[str] = dice_loss_coefficient _snake_case : Any = bbox_loss_coefficient _snake_case : Dict = giou_loss_coefficient _snake_case : Any = eos_coefficient _snake_case : str = focal_alpha _snake_case : int = disable_custom_kernels super().__init__(is_encoder_decoder=a_, **a_ ) @property def UpperCamelCase_ ( self: Any ): '''simple docstring''' return self.encoder_attention_heads @property def UpperCamelCase_ ( self: int ): '''simple docstring''' return self.d_model def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Any = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _snake_case : Dict = self.backbone_config.to_dict() _snake_case : int = self.__class__.model_type return output
28
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _snake_case : Any = [] for num in range(len(snake_case__ ) ): _snake_case : Optional[int] = 0 while 2 * i * i <= odd_composites[num]: _snake_case : Optional[int] = odd_composites[num] - 2 * i * i if is_prime(snake_case__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case__ ) == n: return list_nums return [] def UpperCAmelCase__ (): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = GPTSanJapaneseTokenizer lowercase__ = False lowercase__ = {"do_clean_text": False, "add_prefix_space": False} def UpperCamelCase_ ( self: Dict ): '''simple docstring''' super().setUp() # fmt: off _snake_case : Optional[int] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on _snake_case : Tuple = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀 _snake_case : Tuple = {"""unk_token""": """<unk>"""} _snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] ) _snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file, """w""" ) as emoji_writer: emoji_writer.write(json.dumps(a_ ) ) def UpperCamelCase_ ( self: Dict, **a_: List[Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: int, a_: Any ): '''simple docstring''' _snake_case : Optional[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀""" _snake_case : Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀""" return input_text, output_text def UpperCamelCase_ ( self: Any, a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.get_input_output_texts(a_ ) _snake_case : Tuple = tokenizer.encode(a_, add_special_tokens=a_ ) _snake_case : str = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ ) return text, ids def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass # TODO add if relevant def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass # TODO add if relevant def UpperCamelCase_ ( self: str ): '''simple docstring''' pass # TODO add if relevant def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.get_tokenizer() # Testing tokenization _snake_case : Tuple = """こんにちは、世界。 こんばんは、㔺界。""" _snake_case : Union[str, Any] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""] _snake_case : List[Any] = tokenizer.tokenize(a_ ) self.assertListEqual(a_, a_ ) # Testing conversion to ids without special tokens _snake_case : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] _snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_ ) self.assertListEqual(a_, a_ ) # Testing conversion to ids with special tokens _snake_case : Any = tokens + [tokenizer.unk_token] _snake_case : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] _snake_case : List[Any] = tokenizer.convert_tokens_to_ids(a_ ) self.assertListEqual(a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Tuple = self.get_tokenizer() # Testing tokenization _snake_case : Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。""" _snake_case : List[str] = """こんにちは、、、、世界。こんばんは、、、、世界。""" _snake_case : Optional[Any] = tokenizer.encode(a_ ) _snake_case : Tuple = tokenizer.decode(a_ ) self.assertEqual(a_, a_ ) @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization _snake_case : Optional[Any] = """こんにちは、世界。""" _snake_case : List[Any] = """こんばんは、㔺界。😀""" _snake_case : Tuple = """こんにちは、世界。こんばんは、世界。😀""" _snake_case : str = tokenizer.encode(prefix_text + input_text ) _snake_case : Optional[Any] = tokenizer.encode("""""", prefix_text=prefix_text + input_text ) _snake_case : Optional[int] = tokenizer.encode(a_, prefix_text=a_ ) _snake_case : List[Any] = tokenizer.decode(a_ ) _snake_case : str = tokenizer.decode(a_ ) _snake_case : Union[str, Any] = tokenizer.decode(a_ ) self.assertEqual(a_, a_ ) self.assertEqual(a_, a_ ) self.assertEqual(a_, a_ ) @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization _snake_case : Union[str, Any] = """こんにちは、世界。""" _snake_case : Optional[Any] = """こんばんは、㔺界。😀""" _snake_case : Dict = len(tokenizer.encode(a_ ) ) - 2 _snake_case : List[Any] = len(tokenizer.encode(a_ ) ) - 2 _snake_case : Tuple = [1] + [0] * (len_prefix + len_text + 1) _snake_case : Dict = [1] * (len_prefix + len_text + 1) + [0] _snake_case : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1) _snake_case : List[Any] = tokenizer(prefix_text + input_text ).token_type_ids _snake_case : Optional[int] = tokenizer("""""", prefix_text=prefix_text + input_text ).token_type_ids _snake_case : Optional[Any] = tokenizer(a_, prefix_text=a_ ).token_type_ids self.assertListEqual(a_, a_ ) self.assertListEqual(a_, a_ ) self.assertListEqual(a_, a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) _snake_case : int = tokenizer.encode("""あンいワ""" ) _snake_case : Optional[int] = tokenizer.encode("""""", prefix_text="""あンいワ""" ) _snake_case : Union[str, Any] = tokenizer.encode("""いワ""", prefix_text="""あン""" ) self.assertEqual(tokenizer.decode(a_ ), tokenizer.decode(a_ ) ) self.assertEqual(tokenizer.decode(a_ ), tokenizer.decode(a_ ) ) self.assertNotEqual(a_, a_ ) self.assertNotEqual(a_, a_ ) self.assertEqual(x_token_a[1], x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1], x_token_a[3] ) # SEG token @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) _snake_case : List[str] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]] _snake_case : Dict = tokenizer(a_, padding=a_ ) _snake_case : List[Any] = tokenizer.batch_encode_plus(a_, padding=a_ ) # fmt: off _snake_case : Any = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] _snake_case : Union[str, Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] _snake_case : Tuple = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids, a_ ) self.assertListEqual(x_token.token_type_ids, a_ ) self.assertListEqual(x_token.attention_mask, a_ ) self.assertListEqual(x_token_a.input_ids, a_ ) self.assertListEqual(x_token_a.token_type_ids, a_ ) self.assertListEqual(x_token_a.attention_mask, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass
28
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ): '''simple docstring''' _snake_case : Optional[int] = device _snake_case : str = CLIPTokenizerFast.from_pretrained(a_ ) _snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073] _snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std ) _snake_case : Optional[int] = torchvision.transforms.Resize(224 ) _snake_case : str = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self: List[str], a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.resize(a_ ) _snake_case : List[Any] = self.center_crop(a_ ) _snake_case : Optional[Any] = self.normalize(a_ ) return images def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer(text=a_, **a_ ) _snake_case : Any = self.preprocess_img(a_ ) _snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ): '''simple docstring''' super().__init__() _snake_case : int = None _snake_case : List[str] = device if device else get_device() if vqgan: _snake_case : Any = vqgan else: _snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ ) self.vqgan.eval() if clip: _snake_case : Tuple = clip else: _snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _snake_case : List[str] = ProcessorGradientFlow(device=self.device ) _snake_case : Union[str, Any] = iterations _snake_case : Dict = lr _snake_case : Optional[int] = log _snake_case : List[str] = make_grid _snake_case : Union[str, Any] = return_val _snake_case : List[str] = quantize _snake_case : List[str] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ): '''simple docstring''' _snake_case : Dict = [] if output_path is None: _snake_case : Tuple = """./animation.gif""" if input_path is None: _snake_case : Any = self.save_path _snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) ) if not len(a_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(a_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _snake_case : List[Any] = total_duration / len(a_ ) _snake_case : Optional[Any] = [frame_duration] * len(a_ ) if extend_frames: _snake_case : Optional[int] = 1.5 _snake_case : int = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(a_ ) ) imageio.mimsave(a_, a_, duration=a_ ) print(f"gif saved to {output_path}" ) def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ): '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device ) _snake_case : int = preprocess_vqgan(a_ ) _snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ ) return z def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.latent.detach().requires_grad_() _snake_case : Tuple = base_latent + transform_vector if self.quantize: _snake_case , *_snake_case : Any = self.vqgan.quantize(a_ ) else: _snake_case : List[Any] = trans_latent return self.vqgan.decode(a_ ) def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ): '''simple docstring''' _snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ ) _snake_case : Any = self.clip(**a_ ) _snake_case : str = clip_outputs.logits_per_image if weights is not None: _snake_case : Any = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] ) else: _snake_case : Tuple = torch.tensor([1], device=self.device ) _snake_case : int = -torch.log(a_ ) + torch.log(a_ ) return loss def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ): '''simple docstring''' _snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device ) _snake_case : Dict = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _snake_case : str = self._add_vector(a_ ) _snake_case : List[Any] = loop_post_process(a_ ) _snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ ) print("""CLIP loss""", a_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=a_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ): '''simple docstring''' wandb.init(reinit=a_, project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _snake_case : Any = Image.open(a_ ) _snake_case : str = image.resize((256, 256) ) wandb.log("""Original Image""", wandb.Image(a_ ) ) def UpperCamelCase_ ( self: str, a_: List[Any] ): '''simple docstring''' if not prompts: return [] _snake_case : List[str] = [] _snake_case : Tuple = [] if isinstance(a_, a_ ): _snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(a_, (tuple, list) ): _snake_case : List[Any] = prompt[0] _snake_case : Optional[Any] = float(prompt[1] ) elif ":" in prompt: _snake_case , _snake_case : List[Any] = prompt.split(""":""" ) _snake_case : str = float(a_ ) else: _snake_case : int = prompt _snake_case : Union[str, Any] = 1.0 processed_prompts.append(a_ ) weights.append(a_ ) return { "prompts": processed_prompts, "weights": torch.tensor(a_, device=self.device ), } def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ): '''simple docstring''' if image_path: _snake_case : Union[str, Any] = self._get_latent(a_ ) else: _snake_case : Any = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(a_, a_, a_ ) assert pos_prompts, "You must provide at least one positive prompt." _snake_case : str = self.process_prompts(a_ ) _snake_case : Dict = self.process_prompts(a_ ) if save_final and save_path is None: _snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(a_ ): os.makedirs(a_ ) else: _snake_case : List[Any] = save_path + """_""" + get_timestamp() os.makedirs(a_ ) _snake_case : Optional[Any] = save_path _snake_case : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(a_ ) ) _snake_case : List[Any] = loop_post_process(a_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ): if show_intermediate: show_pil(a_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(a_ )} ) if show_final: show_pil(a_ ) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
28
1
"""simple docstring""" import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Optional[Any] ): """simple docstring""" if openai_config_file == "": _snake_case : int = OpenAIGPTConfig() else: _snake_case : str = OpenAIGPTConfig.from_json_file(snake_case__ ) _snake_case : int = OpenAIGPTModel(snake_case__ ) # Load weights from numpy load_tf_weights_in_openai_gpt(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model _snake_case : Any = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME _snake_case : List[str] = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(model.state_dict() , snake_case__ ) print(F"Save configuration file to {pytorch_config_dump_path}" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--openai_checkpoint_folder_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--openai_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) A_ = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _snake_case : Dict = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" from collections.abc import Callable import numpy as np def UpperCAmelCase__ (snake_case__ : Callable , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ): """simple docstring""" _snake_case : Any = int(np.ceil((x_end - xa) / step_size ) ) _snake_case : Any = np.zeros((n + 1,) ) _snake_case : Optional[int] = ya _snake_case : Optional[int] = xa for k in range(snake_case__ ): _snake_case : Optional[int] = y[k] + step_size * ode_func(snake_case__ , y[k] ) _snake_case : Any = y[k] + ( (step_size / 2) * (ode_func(snake_case__ , y[k] ) + ode_func(x + step_size , snake_case__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ): '''simple docstring''' _snake_case : int = parent _snake_case : int = batch_size _snake_case : List[Any] = image_size _snake_case : List[str] = num_channels _snake_case : Tuple = num_stages _snake_case : Union[str, Any] = hidden_sizes _snake_case : List[Any] = depths _snake_case : Tuple = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[Any] = num_labels _snake_case : Tuple = initializer_range _snake_case : Tuple = out_features _snake_case : Tuple = out_indices _snake_case : Dict = scope def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size], self.num_labels ) _snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : int = ConvNextVaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = ConvNextVaForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : List[str] = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Tuple = None _snake_case : Tuple = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : str = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = ConvNextVaModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : List[Any] = True if model_class.__name__ in [ *get_values(a_ ), *get_values(a_ ), ]: continue _snake_case : Tuple = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Any = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : Any = False _snake_case : List[Any] = True if ( model_class.__name__ in [*get_values(a_ ), *get_values(a_ )] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Dict = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() _snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Optional[int] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : int = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[str] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = ConvNextVaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" A_ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : str , snake_case__ : Any ): """simple docstring""" _snake_case : Dict = set() # keep track of all the paths to be checked _snake_case : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _snake_case : Union[str, Any] = queue.pop(0 ) # get the last node from the path _snake_case : int = path[-1] if node not in explored: _snake_case : Optional[int] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _snake_case : int = list(snake_case__ ) new_path.append(snake_case__ ) queue.append(snake_case__ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(snake_case__ ) # in case there's no path between the 2 nodes return [] def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _snake_case : int = [start] _snake_case : Optional[int] = set(snake_case__ ) # Keep tab on distances from `start` node. _snake_case : Optional[Any] = {start: 0, target: -1} while queue: _snake_case : Dict = queue.pop(0 ) if node == target: _snake_case : List[str] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(snake_case__ ) queue.append(snake_case__ ) _snake_case : List[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
28
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[Any] = features.copy() if features else default_expected_features _snake_case : List[Any] = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ): """simple docstring""" if issubclass(snake_case__ , snake_case__ ): _snake_case : Optional[Any] = parquet_path elif issubclass(snake_case__ , snake_case__ ): _snake_case : int = [parquet_path] _snake_case : Union[str, Any] = tmp_path / """cache""" _snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) for split in splits: _snake_case : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = tmp_path / """cache""" _snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[int] = tmp_path / """cache""" _snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Optional[Any] = features.copy() if features else default_expected_features _snake_case : Dict = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" if split: _snake_case : int = {split: parquet_path} else: _snake_case : Optional[Any] = """train""" _snake_case : int = {"""train""": parquet_path, """test""": parquet_path} _snake_case : Dict = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ): """simple docstring""" _snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" ) _snake_case : int = pf.read() assert dataset.data.table == output_table def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" ) _snake_case : Tuple = {"""image""": [image_path]} _snake_case : Optional[int] = Features({"""image""": Image()} ) _snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ ) _snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" assert get_writer_batch_size(snake_case__ ) == expected
28
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ = { '''configuration_efficientnet''': [ '''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientNetConfig''', '''EfficientNetOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''EfficientNetImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientNetForImageClassification''', '''EfficientNetModel''', '''EfficientNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
28
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ): '''simple docstring''' _snake_case : Dict = parent _snake_case : Dict = batch_size _snake_case : Optional[Any] = image_size _snake_case : int = num_channels _snake_case : Tuple = num_stages _snake_case : int = hidden_sizes _snake_case : List[str] = depths _snake_case : str = is_training _snake_case : Dict = use_labels _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_act _snake_case : Any = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : Union[str, Any] = out_features _snake_case : Dict = num_labels _snake_case : int = scope _snake_case : Dict = num_stages def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Optional[int] = None if self.use_labels: _snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ): '''simple docstring''' _snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = UperNetModelTester(self ) _snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Tuple = [*signature.parameters.keys()] _snake_case : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: str ): '''simple docstring''' def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : List[str] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : Optional[int] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = _config_zero_init(a_ ) _snake_case : Dict = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ ) _snake_case : Dict = prepare_img() _snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Tuple = model(**a_ ) _snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : int = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ ) _snake_case : List[str] = prepare_img() _snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Optional[Any] = model(**a_ ) _snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" A_ = [ '''Audio''', '''Array2D''', '''Array3D''', '''Array4D''', '''Array5D''', '''ClassLabel''', '''Features''', '''Sequence''', '''Value''', '''Image''', '''Translation''', '''TranslationVariableLanguages''', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
28
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path A_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) A_ = [ord(letter) for letter in string.ascii_lowercase] A_ = {ord(char) for char in VALID_CHARS} A_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ): """simple docstring""" _snake_case : str = "" _snake_case : int _snake_case : int _snake_case : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): _snake_case : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def UpperCAmelCase__ (snake_case__ : list[int] ): """simple docstring""" _snake_case : list[str] = [] for key in product(snake_case__ , repeat=3 ): _snake_case : List[Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ): """simple docstring""" _snake_case : list[int] _snake_case : list[str] _snake_case : str _snake_case : str _snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" ) _snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )] _snake_case : Optional[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: _snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break _snake_case : Optional[int] = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str=0.9_99 , snake_case__ : Dict="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case__ : Optional[Any] ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case__ : Optional[int] ): return math.exp(t * -12.0 ) else: raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" ) _snake_case : List[Any] = [] for i in range(snake_case__ ): _snake_case : Tuple = i / num_diffusion_timesteps _snake_case : Optional[int] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) ) return torch.tensor(snake_case__ , dtype=torch.floataa ) class lowercase( __a , __a ): '''simple docstring''' lowercase__ = [e.name for e in KarrasDiffusionSchedulers] lowercase__ = 2 @register_to_config def __init__( self: Tuple, a_: int = 1_000, a_: float = 0.00_085, a_: float = 0.012, a_: str = "linear", a_: Optional[Union[np.ndarray, List[float]]] = None, a_: str = "epsilon", a_: str = "linspace", a_: int = 0, ): '''simple docstring''' if trained_betas is not None: _snake_case : Tuple = torch.tensor(a_, dtype=torch.floataa ) elif beta_schedule == "linear": _snake_case : Dict = torch.linspace(a_, a_, a_, dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _snake_case : List[str] = ( torch.linspace(beta_start**0.5, beta_end**0.5, a_, dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _snake_case : str = betas_for_alpha_bar(a_ ) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" ) _snake_case : int = 1.0 - self.betas _snake_case : Optional[Any] = torch.cumprod(self.alphas, dim=0 ) # set all values self.set_timesteps(a_, a_, a_ ) def UpperCamelCase_ ( self: Tuple, a_: List[str], a_: Dict=None ): '''simple docstring''' if schedule_timesteps is None: _snake_case : List[Any] = self.timesteps _snake_case : Any = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _snake_case : List[Any] = 1 if len(a_ ) > 1 else 0 else: _snake_case : int = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep _snake_case : Tuple = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCamelCase_ ( self: Optional[Any], a_: torch.FloatTensor, a_: Union[float, torch.FloatTensor], ): '''simple docstring''' _snake_case : Union[str, Any] = self.index_for_timestep(a_ ) if self.state_in_first_order: _snake_case : List[str] = self.sigmas[step_index] else: _snake_case : Any = self.sigmas_interpol[step_index] _snake_case : List[str] = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCamelCase_ ( self: List[str], a_: int, a_: Union[str, torch.device] = None, a_: Optional[int] = None, ): '''simple docstring''' _snake_case : Union[str, Any] = num_inference_steps _snake_case : Optional[int] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _snake_case : Optional[int] = np.linspace(0, num_train_timesteps - 1, a_, dtype=a_ )[::-1].copy() elif self.config.timestep_spacing == "leading": _snake_case : int = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _snake_case : int = (np.arange(0, a_ ) * step_ratio).round()[::-1].copy().astype(a_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _snake_case : Optional[int] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _snake_case : Tuple = (np.arange(a_, 0, -step_ratio )).round().copy().astype(a_ ) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) _snake_case : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _snake_case : Dict = torch.from_numpy(np.log(a_ ) ).to(a_ ) _snake_case : int = np.interp(a_, np.arange(0, len(a_ ) ), a_ ) _snake_case : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _snake_case : str = torch.from_numpy(a_ ).to(device=a_ ) # interpolate sigmas _snake_case : int = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp() _snake_case : Optional[int] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) _snake_case : Optional[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(a_ ).startswith("""mps""" ): # mps does not support float64 _snake_case : Dict = torch.from_numpy(a_ ).to(a_, dtype=torch.floataa ) else: _snake_case : Optional[Any] = torch.from_numpy(a_ ).to(a_ ) # interpolate timesteps _snake_case : Tuple = self.sigma_to_t(a_ ).to(a_, dtype=timesteps.dtype ) _snake_case : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten() _snake_case : List[str] = torch.cat([timesteps[:1], interleaved_timesteps] ) _snake_case : Optional[Any] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _snake_case : List[str] = defaultdict(a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Tuple ): '''simple docstring''' _snake_case : Tuple = sigma.log() # get distribution _snake_case : Any = log_sigma - self.log_sigmas[:, None] # get sigmas range _snake_case : int = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) _snake_case : Dict = low_idx + 1 _snake_case : List[str] = self.log_sigmas[low_idx] _snake_case : int = self.log_sigmas[high_idx] # interpolate sigmas _snake_case : Any = (low - log_sigma) / (low - high) _snake_case : Dict = w.clamp(0, 1 ) # transform interpolation to time range _snake_case : Tuple = (1 - w) * low_idx + w * high_idx _snake_case : List[Any] = t.view(sigma.shape ) return t @property def UpperCamelCase_ ( self: Any ): '''simple docstring''' return self.sample is None def UpperCamelCase_ ( self: Any, a_: Union[torch.FloatTensor, np.ndarray], a_: Union[float, torch.FloatTensor], a_: Union[torch.FloatTensor, np.ndarray], a_: bool = True, ): '''simple docstring''' _snake_case : Any = self.index_for_timestep(a_ ) # advance index counter by 1 _snake_case : List[str] = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _snake_case : Union[str, Any] = self.sigmas[step_index] _snake_case : Optional[Any] = self.sigmas_interpol[step_index + 1] _snake_case : Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _snake_case : int = self.sigmas[step_index - 1] _snake_case : Any = self.sigmas_interpol[step_index] _snake_case : Tuple = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _snake_case : Optional[int] = 0 _snake_case : List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _snake_case : str = sigma_hat if self.state_in_first_order else sigma_interpol _snake_case : Optional[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _snake_case : Dict = sigma_hat if self.state_in_first_order else sigma_interpol _snake_case : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _snake_case : int = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _snake_case : Tuple = sigma_interpol - sigma_hat # store for 2nd order step _snake_case : str = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _snake_case : Optional[Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _snake_case : Any = sigma_next - sigma_hat _snake_case : Optional[int] = self.sample _snake_case : Optional[Any] = None _snake_case : Union[str, Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: torch.FloatTensor, a_: torch.FloatTensor, a_: torch.FloatTensor, ): '''simple docstring''' _snake_case : Union[str, Any] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(a_ ): # mps does not support float64 _snake_case : Dict = self.timesteps.to(original_samples.device, dtype=torch.floataa ) _snake_case : List[Any] = timesteps.to(original_samples.device, dtype=torch.floataa ) else: _snake_case : Optional[Any] = self.timesteps.to(original_samples.device ) _snake_case : Any = timesteps.to(original_samples.device ) _snake_case : List[str] = [self.index_for_timestep(a_, a_ ) for t in timesteps] _snake_case : Tuple = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _snake_case : List[Any] = sigma.unsqueeze(-1 ) _snake_case : Any = original_samples + noise * sigma return noisy_samples def __len__( self: Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
28
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "feature_extractor"] lowercase__ = "TvltImageProcessor" lowercase__ = "TvltFeatureExtractor" def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ): '''simple docstring''' super().__init__(image_processor=a_, feature_extractor=a_ ) _snake_case : Any = image_processor _snake_case : Dict = feature_extractor def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ): '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) _snake_case : Optional[int] = None if images is not None: _snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ ) if images_mixed is not None: _snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ ) if audio is not None: _snake_case : Any = self.feature_extractor( a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ ) _snake_case : List[str] = {} if audio is not None: output_dict.update(a_ ) if images is not None: output_dict.update(a_ ) if images_mixed_dict is not None: output_dict.update(a_ ) return output_dict @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.image_processor.model_input_names _snake_case : List[str] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
28
1
"""simple docstring""" import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) A_ = [ '''cross_validation.py''', '''gradient_accumulation.py''', '''local_sgd.py''', '''multi_process_metrics.py''', '''memory.py''', '''automatic_gradient_accumulation.py''', '''fsdp_with_peak_mem_tracking.py''', '''deepspeed_with_config_support.py''', '''megatron_lm_gpt_pretraining.py''', ] class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: List[Any], a_: str, a_: bool, a_: str = None, a_: list = None ): '''simple docstring''' _snake_case : List[Any] = None _snake_case : Union[str, Any] = os.path.abspath(os.path.join("""examples""", """by_feature""" ) ) _snake_case : List[Any] = os.path.abspath("""examples""" ) for item in os.listdir(a_ ): if item not in EXCLUDE_EXAMPLES: _snake_case : List[Any] = os.path.join(a_, a_ ) if os.path.isfile(a_ ) and ".py" in item_path: with self.subTest( tested_script=a_, feature_script=a_, tested_section="""main()""" if parser_only else """training_function()""", ): _snake_case : Dict = compare_against_test( os.path.join(a_, a_ ), a_, a_, a_ ) _snake_case : Any = """\n""".join(a_ ) if special_strings is not None: for string in special_strings: _snake_case : int = diff.replace(a_, """""" ) self.assertEqual(a_, """""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' self.one_complete_example("""complete_nlp_example.py""", a_ ) self.one_complete_example("""complete_nlp_example.py""", a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[str] = os.path.abspath(os.path.join("""examples""", """cv_example.py""" ) ) _snake_case : Optional[Any] = [ """ """ * 16 + """{\n\n""", """ """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""", """ """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""", """ """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""", """ """ * 20 + """\"epoch\": epoch,\n\n""", """ """ * 16 + """},\n\n""", """ """ * 16 + """step=epoch,\n""", """ """ * 12, """ """ * 8 + """for step, batch in enumerate(active_dataloader):\n""", ] self.one_complete_example("""complete_cv_example.py""", a_, a_, a_ ) self.one_complete_example("""complete_cv_example.py""", a_, a_, a_ ) @mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} ) class lowercase( __a ): '''simple docstring''' lowercase__ = False @classmethod def UpperCamelCase_ ( cls: Any ): '''simple docstring''' super().setUpClass() _snake_case : int = tempfile.mkdtemp() _snake_case : Any = os.path.join(cls._tmpdir, """default_config.yml""" ) write_basic_config(save_location=cls.configPath ) _snake_case : Any = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def UpperCamelCase_ ( cls: Dict ): '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Optional[Any] = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir, """epoch_0""" ) ) ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() _snake_case : List[str] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir, """step_2""" ) ) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : str = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'epoch_0' )}\n ".split() _snake_case : Optional[Any] = run_command(self._launch_args + testargs, return_stdout=a_ ) self.assertNotIn("""epoch 0:""", a_ ) self.assertIn("""epoch 1:""", a_ ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'step_2' )}\n ".split() _snake_case : Dict = run_command(self._launch_args + testargs, return_stdout=a_ ) if torch.cuda.is_available(): _snake_case : Dict = torch.cuda.device_count() else: _snake_case : Optional[Any] = 1 if num_processes > 1: self.assertNotIn("""epoch 0:""", a_ ) self.assertIn("""epoch 1:""", a_ ) else: self.assertIn("""epoch 0:""", a_ ) self.assertIn("""epoch 1:""", a_ ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """ examples/by_feature/cross_validation.py --num_folds 2 """.split() with mock.patch.dict(os.environ, {"""TESTING_MOCKED_DATALOADERS""": """0"""} ): _snake_case : Optional[int] = run_command(self._launch_args + testargs, return_stdout=a_ ) _snake_case : List[Any] = re.findall("""({.+})""", a_ ) _snake_case : List[Any] = [r for r in results if """accuracy""" in r][-1] _snake_case : List[Any] = ast.literal_eval(a_ ) self.assertGreaterEqual(results["""accuracy"""], 0.75 ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = ["""examples/by_feature/multi_process_metrics.py"""] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: _snake_case : Dict = f"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(a_, """tracking""" ) ) ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = ["""examples/by_feature/gradient_accumulation.py"""] run_command(self._launch_args + testargs ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Any = ["""examples/by_feature/local_sgd.py"""] run_command(self._launch_args + testargs )
28
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ = '''pt''' elif is_tf_available(): A_ = '''tf''' else: A_ = '''jax''' class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ByTaTokenizer lowercase__ = False def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' super().setUp() _snake_case : List[str] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def UpperCamelCase_ ( self: List[Any], **a_: int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ): '''simple docstring''' _snake_case : List[Any] = [] for i in range(len(a_ ) ): try: _snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) ) _snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) ) if max_length is not None and len(a_ ) > max_length: _snake_case : Tuple = toks[:max_length] if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0: while len(a_ ) < min_length: _snake_case : List[str] = toks + toks # toks_str = [t[1] for t in toks] _snake_case : Tuple = [t[0] for t in toks] # Ensure consistency _snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ ) if " " not in output_txt and len(a_ ) > 1: _snake_case : Dict = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ ) + """ """ + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ ) ) if with_prefix_space: _snake_case : Union[str, Any] = """ """ + output_txt _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) return output_txt, output_ids def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.ta_base_tokenizer _snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) _snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.ta_base_tokenizer _snake_case : Tuple = """Unicode €.""" _snake_case : List[Any] = tokenizer(a_ ) _snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : Tuple = tokenizer.decode(a_ ) self.assertEqual(a_, """Unicode €.</s>""" ) _snake_case : Tuple = tokenizer("""e è é ê ë""" ) _snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : int = tokenizer.decode(a_ ) self.assertEqual(a_, """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.ta_base_tokenizer _snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off _snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ ) self.assertIsInstance(a_, a_ ) if FRAMEWORK != "jax": _snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: _snake_case : Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a_, a_ ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""", a_ ) self.assertIn("""attention_mask""", a_ ) self.assertNotIn("""decoder_input_ids""", a_ ) self.assertNotIn("""decoder_attention_mask""", a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = self.ta_base_tokenizer _snake_case : Dict = [ """Summary of the text.""", """Another summary.""", ] _snake_case : Optional[int] = tokenizer( text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ ) self.assertEqual(32, targets["""input_ids"""].shape[1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""] _snake_case : Dict = ["""Summary of the text. </s>"""] # fmt: off _snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _snake_case : Optional[Any] = tokenizer(a_, text_target=a_ ) self.assertEqual(a_, batch["""input_ids"""][0] ) self.assertEqual(a_, batch["""labels"""][0] ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : List[str] = tempfile.mkdtemp() _snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) shutil.rmtree(a_ ) _snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) _snake_case : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) _snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : Union[str, Any] = json.load(a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : List[Any] = json.load(a_ ) _snake_case : int = [f"<extra_id_{i}>" for i in range(125 )] _snake_case : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] _snake_case : Dict = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case : Optional[int] = tokenizer_class.from_pretrained( a_, ) self.assertIn( """an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )] _snake_case : List[Any] = tokenizer_class.from_pretrained( a_, additional_special_tokens=a_, ) self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] _snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ ) self.assertIsInstance(a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Optional[int] = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _snake_case : Any = 0 _snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens( a_, skip_special_tokens=a_ ) for attr in attributes_list: setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, """additional_special_tokens_ids""", [] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] ) setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
28
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class lowercase( __a ): '''simple docstring''' lowercase__ = "luke" def __init__( self: Dict, a_: str=50_267, a_: Tuple=500_000, a_: Tuple=768, a_: Optional[Any]=256, a_: Optional[Any]=12, a_: Tuple=12, a_: Union[str, Any]=3_072, a_: Any="gelu", a_: Optional[int]=0.1, a_: Optional[int]=0.1, a_: Tuple=512, a_: Optional[Any]=2, a_: Union[str, Any]=0.02, a_: Union[str, Any]=1E-12, a_: Any=True, a_: Any=None, a_: Tuple=1, a_: Optional[int]=0, a_: Union[str, Any]=2, **a_: List[Any], ): '''simple docstring''' super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_ ) _snake_case : Optional[int] = vocab_size _snake_case : Any = entity_vocab_size _snake_case : Optional[Any] = hidden_size _snake_case : Union[str, Any] = entity_emb_size _snake_case : str = num_hidden_layers _snake_case : Union[str, Any] = num_attention_heads _snake_case : Any = hidden_act _snake_case : Union[str, Any] = intermediate_size _snake_case : str = hidden_dropout_prob _snake_case : Any = attention_probs_dropout_prob _snake_case : int = max_position_embeddings _snake_case : List[Any] = type_vocab_size _snake_case : List[str] = initializer_range _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = use_entity_aware_attention _snake_case : List[Any] = classifier_dropout
28
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase( __a ): '''simple docstring''' @staticmethod @abstractmethod def UpperCamelCase_ ( a_: ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' raise NotImplementedError()
28
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: List[str], *a_: Optional[Any], **a_: Optional[int] ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: List[Any], *a_: List[str], **a_: Optional[int] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Optional[Any], *a_: Tuple, **a_: Union[str, Any] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: Any, *a_: Union[str, Any], **a_: Optional[Any] ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Optional[Any], *a_: Optional[int], **a_: List[Any] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: List[str], *a_: List[str], **a_: int ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: List[Any], *a_: Optional[int], **a_: List[Any] ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: Optional[Any] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Optional[Any], *a_: Optional[int], **a_: Optional[int] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: Any, *a_: Any, **a_: Union[str, Any] ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Dict, *a_: str, **a_: Tuple ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Any, *a_: Dict, **a_: Tuple ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: Optional[int], *a_: Union[str, Any], **a_: int ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], *a_: List[str], **a_: Optional[int] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: List[str], *a_: str, **a_: int ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: int, *a_: str, **a_: List[Any] ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: str, *a_: int, **a_: Union[str, Any] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Optional[int], *a_: int, **a_: Optional[int] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: Optional[Any], *a_: List[str], **a_: Dict ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: List[str], *a_: List[str], **a_: Tuple ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Any, *a_: Dict, **a_: List[str] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: List[Any], *a_: Any, **a_: Dict ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: str, *a_: List[Any], **a_: Optional[Any] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: str, *a_: int, **a_: str ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: str, *a_: Optional[Any], **a_: List[Any] ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Dict, *a_: Dict, **a_: Tuple ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Optional[int], *a_: Tuple, **a_: Dict ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: List[str], *a_: Union[str, Any], **a_: Any ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: List[str], *a_: Dict, **a_: str ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: int, *a_: Dict, **a_: str ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: Optional[Any], *a_: str, **a_: str ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Optional[Any], *a_: Tuple, **a_: Union[str, Any] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Dict, *a_: Union[str, Any], **a_: List[str] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: Optional[int], *a_: Dict, **a_: List[Any] ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: List[Any], *a_: List[Any], **a_: Union[str, Any] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Any, *a_: Tuple, **a_: Dict ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["flax"] def __init__( self: int, *a_: Optional[Any], **a_: int ): '''simple docstring''' requires_backends(self, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], *a_: Tuple, **a_: List[Any] ): '''simple docstring''' requires_backends(cls, ["""flax"""] ) @classmethod def UpperCamelCase_ ( cls: str, *a_: int, **a_: List[Any] ): '''simple docstring''' requires_backends(cls, ["""flax"""] )
28
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowercase( __a ): '''simple docstring''' lowercase__ = "roformer" def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ): '''simple docstring''' super().__init__(pad_token_id=a_, **a_ ) _snake_case : int = vocab_size _snake_case : int = hidden_size if embedding_size is None else embedding_size _snake_case : Dict = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Dict = hidden_act _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : List[Any] = initializer_range _snake_case : List[Any] = layer_norm_eps _snake_case : Optional[Any] = rotary_value _snake_case : List[str] = use_cache class lowercase( __a ): '''simple docstring''' @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' if self.task == "multiple-choice": _snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case : List[str] = {0: """batch""", 1: """sequence"""} _snake_case : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
28
1
"""simple docstring""" import os from collections import deque import torch from torch.utils.data import Dataset class lowercase( __a ): '''simple docstring''' def __init__( self: Optional[int], a_: Union[str, Any]="", a_: Optional[Any]="train" ): '''simple docstring''' assert os.path.isdir(a_ ) _snake_case : str = [] _snake_case : Tuple = os.listdir(a_ ) for story_filename in story_filenames_list: if "summary" in story_filename: continue _snake_case : Optional[int] = os.path.join(a_, a_ ) if not os.path.isfile(a_ ): continue self.documents.append(a_ ) def __len__( self: Tuple ): '''simple docstring''' return len(self.documents ) def __getitem__( self: Optional[int], a_: Optional[int] ): '''simple docstring''' _snake_case : str = self.documents[idx] _snake_case : str = document_path.split("""/""" )[-1] with open(a_, encoding="""utf-8""" ) as source: _snake_case : Dict = source.read() _snake_case , _snake_case : Optional[Any] = process_story(a_ ) return document_name, story_lines, summary_lines def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : int = list(filter(lambda snake_case__ : len(snake_case__ ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) ) # for some unknown reason some lines miss a period, add it _snake_case : Tuple = [_add_missing_period(snake_case__ ) for line in nonempty_lines] # gather article lines _snake_case : str = [] _snake_case : List[str] = deque(snake_case__ ) while True: try: _snake_case : Any = lines.popleft() if element.startswith("""@highlight""" ): break story_lines.append(snake_case__ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines _snake_case : Tuple = list(filter(lambda snake_case__ : not t.startswith("""@highlight""" ) , snake_case__ ) ) return story_lines, summary_lines def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" _snake_case : Dict = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""] if line.startswith("""@highlight""" ): return line if line[-1] in END_TOKENS: return line return line + "." def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Any ): """simple docstring""" if len(snake_case__ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(snake_case__ )) ) return sequence def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ): """simple docstring""" _snake_case : Union[str, Any] = torch.ones_like(snake_case__ ) _snake_case : List[str] = sequence == pad_token_id _snake_case : Union[str, Any] = 0 return mask def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int] ): """simple docstring""" _snake_case : List[str] = [tokenizer.encode(snake_case__ ) for line in story_lines] _snake_case : List[Any] = [token for sentence in story_lines_token_ids for token in sentence] _snake_case : Optional[int] = [tokenizer.encode(snake_case__ ) for line in summary_lines] _snake_case : List[Any] = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[Any] ): """simple docstring""" _snake_case : Optional[Any] = [] for sequence in batch: _snake_case : Union[str, Any] = -1 _snake_case : List[Any] = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(snake_case__ ) return torch.tensor(snake_case__ )
28
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ): """simple docstring""" _snake_case : Optional[Any] = [] for old_item in old_list: _snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" ) _snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" ) _snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" ) _snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" ) _snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ): """simple docstring""" _snake_case : Dict = [] for old_item in old_list: _snake_case : Dict = old_item _snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _snake_case : Union[str, Any] = old_checkpoint[path] _snake_case : Optional[int] = old_tensor.shape[0] // 3 _snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 ) _snake_case : Union[str, Any] = query.reshape(snake_case__ ) _snake_case : Tuple = key.reshape(snake_case__ ) _snake_case : Any = value.reshape(snake_case__ ) for path in paths: _snake_case : List[Any] = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0] else: _snake_case : Optional[Any] = old_checkpoint[path["""old"""]] def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : int = {} _snake_case : Tuple = checkpoint["""time_embed.0.weight"""] _snake_case : List[str] = checkpoint["""time_embed.0.bias"""] _snake_case : List[str] = checkpoint["""time_embed.2.weight"""] _snake_case : Tuple = checkpoint["""time_embed.2.bias"""] _snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""] _snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""] _snake_case : List[Any] = checkpoint["""out.0.weight"""] _snake_case : Any = checkpoint["""out.0.bias"""] _snake_case : Any = checkpoint["""out.2.weight"""] _snake_case : List[str] = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _snake_case : Any = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only _snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _snake_case : Optional[int] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only _snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _snake_case : List[Any] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): _snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1) _snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _snake_case : Union[str, Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] _snake_case : Dict = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _snake_case : Optional[int] = renew_resnet_paths(snake_case__ ) _snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : List[str] = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : Optional[int] = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) _snake_case : int = middle_blocks[0] _snake_case : List[str] = middle_blocks[1] _snake_case : Any = middle_blocks[2] _snake_case : Dict = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Any = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Dict = renew_attention_paths(snake_case__ ) _snake_case : Tuple = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): _snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1) _snake_case : Dict = i % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] _snake_case : Any = {} for layer in output_block_layers: _snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: _snake_case : str = [layer_name] if len(snake_case__ ) > 1: _snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _snake_case : List[Any] = renew_resnet_paths(snake_case__ ) _snake_case : int = renew_resnet_paths(snake_case__ ) _snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _snake_case : Any = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _snake_case : Optional[int] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: _snake_case : Any = [] if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : str = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : int = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , ) else: _snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] ) _snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] ) _snake_case : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A_ = parser.parse_args() A_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ = json.loads(f.read()) A_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
28
1
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A_ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = XLMProphetNetTokenizer lowercase__ = False lowercase__ = True def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _snake_case : Tuple = XLMProphetNetTokenizer(a_, keep_accents=a_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Tuple = """[PAD]""" _snake_case : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], """[PAD]""" ) self.assertEqual(vocab_keys[1], """[CLS]""" ) self.assertEqual(vocab_keys[-1], """j""" ) self.assertEqual(len(a_ ), 1_012 ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 1_012 ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = XLMProphetNetTokenizer(a_, keep_accents=a_ ) _snake_case : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) _snake_case : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( a_, [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ], ) _snake_case : int = tokenizer.convert_tokens_to_ids(a_ ) self.assertListEqual( a_, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ], ) _snake_case : str = tokenizer.convert_ids_to_tokens(a_ ) self.assertListEqual( a_, [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """[UNK]""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """[UNK]""", """.""", ], ) @cached_property def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = """Hello World!""" _snake_case : Optional[int] = [35_389, 6_672, 49, 2] self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) ) @slow def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Tuple = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_, model_name="""microsoft/xprophetnet-large-wiki100-cased""", revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""", )
28
"""simple docstring""" from typing import Any def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not input_list: return [] _snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list] _snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": A_ = argparse.ArgumentParser( description=( '''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned''' ''' Distillation''' ) ) parser.add_argument('''--model_type''', default='''bert''', choices=['''bert''']) parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str) parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str) parser.add_argument('''--vocab_transform''', action='''store_true''') A_ = parser.parse_args() if args.model_type == "bert": A_ = BertForMaskedLM.from_pretrained(args.model_name) A_ = '''bert''' else: raise ValueError('''args.model_type should be "bert".''') A_ = model.state_dict() A_ = {} for w in ["word_embeddings", "position_embeddings"]: A_ = state_dict[F'''{prefix}.embeddings.{w}.weight'''] for w in ["weight", "bias"]: A_ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}'''] A_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: A_ = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}''' ] A_ = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}''' ] A_ = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}''' ] A_ = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}''' ] A_ = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}''' ] A_ = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}''' ] A_ = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}''' ] A_ = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}''' ] std_idx += 1 A_ = state_dict['''cls.predictions.decoder.weight'''] A_ = state_dict['''cls.predictions.bias'''] if args.vocab_transform: for w in ["weight", "bias"]: A_ = state_dict[F'''cls.predictions.transform.dense.{w}'''] A_ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}'''] print(F'''N layers selected for distillation: {std_idx}''') print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
28
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Any = num_channels _snake_case : Union[str, Any] = patch_size _snake_case : Dict = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : Any = layer_norm_eps _snake_case : int = stop_gradient _snake_case : Any = share_layernorm _snake_case : List[Any] = remove_last_layer @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = hidden_act _snake_case : List[Any] = initializer_factor _snake_case : Optional[int] = intermediate_size _snake_case : int = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : Optional[int] = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Dict = position_embedding_type _snake_case : Dict = use_cache _snake_case : int = pad_token_id _snake_case : Union[str, Any] = bos_token_id _snake_case : Union[str, Any] = eos_token_id @classmethod def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ): '''simple docstring''' _snake_case : str = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : str = share_cross_modal_transformer_layers _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_size _snake_case : Union[str, Any] = initializer_factor _snake_case : Dict = layer_norm_eps _snake_case : Dict = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : int = num_hidden_layers _snake_case : int = tie_word_embeddings _snake_case : Optional[Any] = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : str = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : Any = BridgeTowerTextConfig(**a_ ) _snake_case : List[Any] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : str = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
28
1
"""simple docstring""" from __future__ import annotations from typing import Any class lowercase: '''simple docstring''' def __init__( self: Union[str, Any], a_: int = 6 ): '''simple docstring''' _snake_case : Node | None = None _snake_case : Node | None = None self.create_linked_list(a_ ) def UpperCamelCase_ ( self: int, a_: int ): '''simple docstring''' _snake_case : List[Any] = Node() _snake_case : Any = current_node _snake_case : int = current_node _snake_case : Union[str, Any] = current_node for _ in range(1, a_ ): _snake_case : Optional[Any] = Node() _snake_case : str = current_node _snake_case : str = previous_node _snake_case : int = current_node _snake_case : Tuple = self.front _snake_case : str = previous_node def UpperCamelCase_ ( self: int ): '''simple docstring''' return ( self.front == self.rear and self.front is not None and self.front.data is None ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' self.check_can_perform_operation() return self.front.data if self.front else None def UpperCamelCase_ ( self: int, a_: Any ): '''simple docstring''' if self.rear is None: return self.check_is_full() if not self.is_empty(): _snake_case : Tuple = self.rear.next if self.rear: _snake_case : int = data def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: _snake_case : Tuple = self.front.data _snake_case : Optional[Any] = None return data _snake_case : Optional[Any] = self.front _snake_case : List[Any] = old_front.next _snake_case : Optional[Any] = old_front.data _snake_case : Optional[int] = None return data def UpperCamelCase_ ( self: Any ): '''simple docstring''' if self.is_empty(): raise Exception("""Empty Queue""" ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' if self.rear and self.rear.next == self.front: raise Exception("""Full Queue""" ) class lowercase: '''simple docstring''' def __init__( self: Tuple ): '''simple docstring''' _snake_case : Any | None = None _snake_case : Node | None = None _snake_case : Node | None = None if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" ) return image def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Optional[int] = val def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" ) _snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict _snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) _snake_case : Dict = qkv_bias def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24 _snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict() elif "opt-6.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict() elif "t5-xl" in model_name: _snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ ) return config, image_size @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ): """simple docstring""" _snake_case : List[str] = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0] _snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ ) _snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval() _snake_case : int = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _snake_case , _snake_case : List[Any] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" _snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print("""Done!""" ) # update state dict keys _snake_case : Any = original_model.state_dict() _snake_case : Dict = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _snake_case : str = state_dict.pop(snake_case__ ) if key.startswith("""Qformer.bert""" ): _snake_case : str = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _snake_case : Any = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _snake_case : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _snake_case : List[Any] = key.replace("""t5""" , """language""" ) _snake_case : str = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) _snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) assert len(snake_case__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _snake_case : Any = load_demo_image() _snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) _snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ ) # create processor _snake_case : Any = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) _snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case__ , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "opt" in model_name: _snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _snake_case : int = hf_model(snake_case__ , snake_case__ ).logits else: _snake_case : str = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _snake_case : List[str] = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _snake_case : Union[str, Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ ) else: # cast to same type _snake_case : int = logits.dtype assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _snake_case : Any = """""" _snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ ) _snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} ) _snake_case : Tuple = hf_model.generate( snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , snake_case__ ) _snake_case : Optional[Any] = input_ids.shape[1] _snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ ) _snake_case : Optional[Any] = [text.strip() for text in output_text] print("""HF generation:""" , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F"nielsr/{model_name}" ) hf_model.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() A_ = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) A_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
"""simple docstring""" import argparse from collections import defaultdict def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Any ): """simple docstring""" _snake_case : Optional[int] = F"{file}_{class_name}_{test_name}" done_test[_id] += 1 with open(snake_case__ , """r""" ) as f: _snake_case : Union[str, Any] = f.readlines() _snake_case : int = F"class {class_name}(" _snake_case : Optional[int] = F"{4 * ' '}def {test_name}(" _snake_case : List[Any] = F"{8 * ' '}{correct_line.split()[0]}" _snake_case : Optional[Any] = F"{16 * ' '}{correct_line.split()[0]}" _snake_case : Union[str, Any] = False _snake_case : List[Any] = False _snake_case : Optional[Any] = False _snake_case : Union[str, Any] = False _snake_case : Dict = 0 _snake_case : Optional[int] = 0 _snake_case : List[str] = [] for line in lines: if line.startswith(snake_case__ ): _snake_case : Optional[Any] = True elif in_class and line.startswith(snake_case__ ): _snake_case : List[Any] = True elif in_class and in_func and (line.startswith(snake_case__ ) or line.startswith(snake_case__ )): _snake_case : List[Any] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _snake_case : Union[str, Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: _snake_case : Optional[int] = True if in_class and in_func and in_line and insert_line: new_lines.append(F"{spaces * ' '}{correct_line}" ) _snake_case : Optional[Any] = False else: new_lines.append(snake_case__ ) with open(snake_case__ , """w""" ) as f: for line in new_lines: f.write(snake_case__ ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Tuple=None ): """simple docstring""" if fail is not None: with open(snake_case__ , """r""" ) as f: _snake_case : List[str] = {l.strip() for l in f.readlines()} else: _snake_case : str = None with open(snake_case__ , """r""" ) as f: _snake_case : List[str] = f.readlines() _snake_case : Tuple = defaultdict(snake_case__ ) for line in correct_lines: _snake_case , _snake_case , _snake_case , _snake_case : List[str] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) A_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
28
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : List[str] = parent_key + sep + k if parent_key else k if isinstance(snake_case__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() ) else: items.append((new_key, v) ) return dict(snake_case__ ) _snake_case : Dict = argparse.Namespace() with open(snake_case__ , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader ) _snake_case : Any = flatten_yaml_as_dict(snake_case__ ) for k, v in flat_cfg.items(): setattr(snake_case__ , snake_case__ , snake_case__ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) ) return config def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Dict = MobileViTVaConfig() _snake_case : Optional[int] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Union[str, Any] = 3_84 else: _snake_case : Optional[Any] = 2_56 _snake_case : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : str = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Union[str, Any] = 2_56 _snake_case : Tuple = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _snake_case : Tuple = 1_51 _snake_case : str = 5_12 _snake_case : List[Any] = """ade20k-id2label.json""" _snake_case : Union[str, Any] = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : int = """pascal-voc-id2label.json""" _snake_case : int = True # orig_config _snake_case : int = load_orig_config_file(snake_case__ ) assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Union[str, Any] = """huggingface/label-files""" _snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Tuple = idalabel _snake_case : Any = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = dct.pop(snake_case__ ) _snake_case : List[Any] = val def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ): """simple docstring""" if base_model: _snake_case : Any = """""" else: _snake_case : Union[str, Any] = """mobilevitv2.""" _snake_case : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : List[str] = k[8:] else: _snake_case : str = k if ".block." in k: _snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Any = [0, 1, 2, 3] elif i == 5: _snake_case : List[Any] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : Any = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : List[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case__ ) for k in keys_to_ignore: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): """simple docstring""" _snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval() _snake_case : Optional[Any] = False # remove and rename some keys of load the original model _snake_case : Union[str, Any] = checkpoint remove_unused_keys(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load modified state_dict model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : List[str] = outputs.logits _snake_case : Any = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig A_ = [ '''openmmlab/upernet-convnext-tiny''', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring A_ = '''UperNetConfig''' class lowercase( nn.Module ): '''simple docstring''' def __init__( self: str, a_: int, a_: int, a_: Union[int, Tuple[int, int]], a_: Union[int, Tuple[int, int], str] = 0, a_: bool = False, a_: Union[int, Tuple[int, int]] = 1, ): '''simple docstring''' super().__init__() _snake_case : Union[str, Any] = nn.Convad( in_channels=a_, out_channels=a_, kernel_size=a_, padding=a_, bias=a_, dilation=a_, ) _snake_case : Optional[int] = nn.BatchNormad(a_ ) _snake_case : Tuple = nn.ReLU() def UpperCamelCase_ ( self: Optional[Any], a_: torch.Tensor ): '''simple docstring''' _snake_case : Union[str, Any] = self.conv(a_ ) _snake_case : Optional[int] = self.batch_norm(a_ ) _snake_case : Dict = self.activation(a_ ) return output class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[str], a_: int, a_: int, a_: int ): '''simple docstring''' super().__init__() _snake_case : List[str] = [ nn.AdaptiveAvgPoolad(a_ ), UperNetConvModule(a_, a_, kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(a_ ), a_ ) def UpperCamelCase_ ( self: List[Any], a_: torch.Tensor ): '''simple docstring''' _snake_case : List[Any] = input for layer in self.layers: _snake_case : str = layer(a_ ) return hidden_state class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[str], a_: Tuple[int, ...], a_: int, a_: int, a_: bool ): '''simple docstring''' super().__init__() _snake_case : List[str] = pool_scales _snake_case : Optional[Any] = align_corners _snake_case : List[Any] = in_channels _snake_case : Dict = channels _snake_case : Optional[int] = [] for i, pool_scale in enumerate(a_ ): _snake_case : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=a_, in_channels=a_, channels=a_ ) self.blocks.append(a_ ) self.add_module(str(a_ ), a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: torch.Tensor ): '''simple docstring''' _snake_case : str = [] for ppm in self.blocks: _snake_case : Any = ppm(a_ ) _snake_case : Dict = nn.functional.interpolate( a_, size=x.size()[2:], mode="""bilinear""", align_corners=self.align_corners ) ppm_outs.append(a_ ) return ppm_outs class lowercase( nn.Module ): '''simple docstring''' def __init__( self: Any, a_: Any, a_: Dict ): '''simple docstring''' super().__init__() _snake_case : Tuple = config _snake_case : Tuple = config.pool_scales # e.g. (1, 2, 3, 6) _snake_case : str = in_channels _snake_case : Dict = config.hidden_size _snake_case : Any = False _snake_case : List[Any] = nn.Convad(self.channels, config.num_labels, kernel_size=1 ) # PSP Module _snake_case : List[Any] = UperNetPyramidPoolingModule( self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners, ) _snake_case : int = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels, self.channels, kernel_size=3, padding=1, ) # FPN Module _snake_case : str = nn.ModuleList() _snake_case : Tuple = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer _snake_case : List[Any] = UperNetConvModule(a_, self.channels, kernel_size=1 ) _snake_case : Optional[Any] = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1 ) self.lateral_convs.append(a_ ) self.fpn_convs.append(a_ ) _snake_case : str = UperNetConvModule( len(self.in_channels ) * self.channels, self.channels, kernel_size=3, padding=1, ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' self.apply(self._init_weights ) def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ): '''simple docstring''' if isinstance(a_, nn.Convad ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCamelCase_ ( self: List[Any], a_: Any ): '''simple docstring''' _snake_case : Optional[int] = inputs[-1] _snake_case : Tuple = [x] psp_outs.extend(self.psp_modules(a_ ) ) _snake_case : Tuple = torch.cat(a_, dim=1 ) _snake_case : Optional[Any] = self.bottleneck(a_ ) return output def UpperCamelCase_ ( self: Optional[Any], a_: torch.Tensor ): '''simple docstring''' _snake_case : int = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(a_ ) ) # build top-down path _snake_case : List[str] = len(a_ ) for i in range(used_backbone_levels - 1, 0, -1 ): _snake_case : Dict = laterals[i - 1].shape[2:] _snake_case : Tuple = laterals[i - 1] + nn.functional.interpolate( laterals[i], size=a_, mode="""bilinear""", align_corners=self.align_corners ) # build outputs _snake_case : Optional[int] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1, 0, -1 ): _snake_case : int = nn.functional.interpolate( fpn_outs[i], size=fpn_outs[0].shape[2:], mode="""bilinear""", align_corners=self.align_corners ) _snake_case : Optional[Any] = torch.cat(a_, dim=1 ) _snake_case : Any = self.fpn_bottleneck(a_ ) _snake_case : Dict = self.classifier(a_ ) return output class lowercase( nn.Module ): '''simple docstring''' def __init__( self: Dict, a_: int, a_: int = 2, a_: int = 3, a_: Union[int, Tuple[int, int]] = 1 ): '''simple docstring''' super().__init__() _snake_case : List[Any] = config _snake_case : Dict = config.auxiliary_in_channels _snake_case : Dict = config.auxiliary_channels _snake_case : int = config.auxiliary_num_convs _snake_case : Any = config.auxiliary_concat_input _snake_case : Union[str, Any] = in_index _snake_case : int = (kernel_size // 2) * dilation _snake_case : Tuple = [] convs.append( UperNetConvModule( self.in_channels, self.channels, kernel_size=a_, padding=a_, dilation=a_ ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels, self.channels, kernel_size=a_, padding=a_, dilation=a_ ) ) if self.num_convs == 0: _snake_case : str = nn.Identity() else: _snake_case : List[Any] = nn.Sequential(*a_ ) if self.concat_input: _snake_case : Dict = UperNetConvModule( self.in_channels + self.channels, self.channels, kernel_size=a_, padding=kernel_size // 2 ) _snake_case : Optional[Any] = nn.Convad(self.channels, config.num_labels, kernel_size=1 ) def UpperCamelCase_ ( self: int ): '''simple docstring''' self.apply(self._init_weights ) def UpperCamelCase_ ( self: Dict, a_: List[str] ): '''simple docstring''' if isinstance(a_, nn.Convad ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def UpperCamelCase_ ( self: List[Any], a_: torch.Tensor ): '''simple docstring''' _snake_case : Any = encoder_hidden_states[self.in_index] _snake_case : Optional[Any] = self.convs(a_ ) if self.concat_input: _snake_case : Any = self.conv_cat(torch.cat([hidden_states, output], dim=1 ) ) _snake_case : str = self.classifier(a_ ) return output class lowercase( __a ): '''simple docstring''' lowercase__ = UperNetConfig lowercase__ = "pixel_values" lowercase__ = True def UpperCamelCase_ ( self: Dict, a_: Optional[int] ): '''simple docstring''' if isinstance(a_, a_ ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def UpperCamelCase_ ( self: str ): '''simple docstring''' self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def UpperCamelCase_ ( self: int, a_: Union[str, Any], a_: str=False ): '''simple docstring''' if isinstance(a_, a_ ): _snake_case : Union[str, Any] = value A_ = r''' Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' A_ = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , __a , ) class lowercase( __a ): '''simple docstring''' def __init__( self: Union[str, Any], a_: Any ): '''simple docstring''' super().__init__(a_ ) _snake_case : Tuple = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) _snake_case : int = UperNetHead(a_, in_channels=self.backbone.channels ) _snake_case : Tuple = UperNetFCNHead(a_ ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) ) @replace_return_docstrings(output_type=a_, config_class=_CONFIG_FOR_DOC ) def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[torch.Tensor] = None, a_: Optional[bool] = None, a_: Optional[bool] = None, a_: Optional[torch.Tensor] = None, a_: Optional[bool] = None, ): '''simple docstring''' _snake_case : Dict = return_dict if return_dict is not None else self.config.use_return_dict _snake_case : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions _snake_case : str = self.backbone.forward_with_filtered_kwargs( a_, output_hidden_states=a_, output_attentions=a_ ) _snake_case : Dict = outputs.feature_maps _snake_case : Optional[Any] = self.decode_head(a_ ) _snake_case : int = nn.functional.interpolate(a_, size=pixel_values.shape[2:], mode="""bilinear""", align_corners=a_ ) _snake_case : Dict = None if self.auxiliary_head is not None: _snake_case : Optional[Any] = self.auxiliary_head(a_ ) _snake_case : List[str] = nn.functional.interpolate( a_, size=pixel_values.shape[2:], mode="""bilinear""", align_corners=a_ ) _snake_case : Any = None if labels is not None: if self.config.num_labels == 1: raise ValueError("""The number of labels should be greater than one""" ) else: # compute weighted loss _snake_case : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) _snake_case : Any = loss_fct(a_, a_ ) _snake_case : List[str] = loss_fct(a_, a_ ) _snake_case : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: _snake_case : List[str] = (logits,) + outputs[1:] else: _snake_case : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=a_, logits=a_, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
28
"""simple docstring""" import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, '''src''', '''diffusers''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" ) self.assertEqual(a_, """torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(a_, """torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case : Union[str, Any] = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(a_, """torch_and_transformers_and_onnx""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""", a_ ) self.assertIn("""torch_and_transformers""", a_ ) self.assertIn("""flax_and_transformers""", a_ ) self.assertIn("""torch_and_transformers_and_onnx""", a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""", objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" ) self.assertEqual(a_, """\nCONSTANT = None\n""" ) _snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" ) self.assertEqual( a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) _snake_case : List[Any] = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ _snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ _snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""], a_ )
28
1
"""simple docstring""" import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowercase: '''simple docstring''' @staticmethod def UpperCamelCase_ ( *a_: Dict, **a_: Union[str, Any] ): '''simple docstring''' pass @is_pipeline_test @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @require_torch def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = pipeline( model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""", ) _snake_case : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) _snake_case : Any = image_classifier(a_, candidate_labels=["""a""", """b""", """c"""] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(a_ ), [ [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}], [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}], ], ) _snake_case : str = image_classifier([image] * 5, candidate_labels=["""A""", """B""", """C"""], batch_size=2 ) self.assertEqual( nested_simplify(a_ ), [ [ {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, ], [ {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, ], [ {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, ], [ {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, ], [ {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, ], ], ) @require_tf def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : List[str] = pipeline( model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""", framework="""tf""" ) _snake_case : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) _snake_case : str = image_classifier(a_, candidate_labels=["""a""", """b""", """c"""] ) self.assertEqual( nested_simplify(a_ ), [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}], ) _snake_case : Any = image_classifier([image] * 5, candidate_labels=["""A""", """B""", """C"""], batch_size=2 ) self.assertEqual( nested_simplify(a_ ), [ [ {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, ], [ {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, ], [ {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, ], [ {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, ], [ {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, {"""score""": 0.333, """label""": ANY(a_ )}, ], ], ) @slow @require_torch def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = pipeline( task="""zero-shot-image-classification""", model="""openai/clip-vit-base-patch32""", ) # This is an image of 2 cats with remotes and no planes _snake_case : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) _snake_case : Any = image_classifier(a_, candidate_labels=["""cat""", """plane""", """remote"""] ) self.assertEqual( nested_simplify(a_ ), [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ], ) _snake_case : str = image_classifier([image] * 5, candidate_labels=["""cat""", """plane""", """remote"""], batch_size=2 ) self.assertEqual( nested_simplify(a_ ), [ [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ], ] * 5, ) @slow @require_tf def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[Any] = pipeline( task="""zero-shot-image-classification""", model="""openai/clip-vit-base-patch32""", framework="""tf""" ) # This is an image of 2 cats with remotes and no planes _snake_case : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) _snake_case : Tuple = image_classifier(a_, candidate_labels=["""cat""", """plane""", """remote"""] ) self.assertEqual( nested_simplify(a_ ), [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ], ) _snake_case : Any = image_classifier([image] * 5, candidate_labels=["""cat""", """plane""", """remote"""], batch_size=2 ) self.assertEqual( nested_simplify(a_ ), [ [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ], ] * 5, )
28
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A_ = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''OwlViTFeatureExtractor'''] A_ = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Any = num_channels _snake_case : Union[str, Any] = patch_size _snake_case : Dict = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : Any = layer_norm_eps _snake_case : int = stop_gradient _snake_case : Any = share_layernorm _snake_case : List[Any] = remove_last_layer @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = hidden_act _snake_case : List[Any] = initializer_factor _snake_case : Optional[int] = intermediate_size _snake_case : int = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : Optional[int] = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Dict = position_embedding_type _snake_case : Dict = use_cache _snake_case : int = pad_token_id _snake_case : Union[str, Any] = bos_token_id _snake_case : Union[str, Any] = eos_token_id @classmethod def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ): '''simple docstring''' _snake_case : str = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : str = share_cross_modal_transformer_layers _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_size _snake_case : Union[str, Any] = initializer_factor _snake_case : Dict = layer_norm_eps _snake_case : Dict = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : int = num_hidden_layers _snake_case : int = tie_word_embeddings _snake_case : Optional[Any] = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : str = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : Any = BridgeTowerTextConfig(**a_ ) _snake_case : List[Any] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : str = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
28
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ): """simple docstring""" def run_func(snake_case__ : Tuple ): @wraps(snake_case__ ) def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ): return func(*snake_case__ , **snake_case__ ) @wraps(snake_case__ ) @tf.function(experimental_compile=snake_case__ ) def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ): return func(*snake_case__ , **snake_case__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = random.Random() _snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowercase( __a ): '''simple docstring''' lowercase__ = 42 lowercase__ = 42 lowercase__ = "TensorFlow" @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return tf.__version__ def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[str] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_speed(_inference ) def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ ) return self._measure_speed(_train ) def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_memory(_inference ) def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ ) return self._measure_memory(_train ) def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : List[Any] = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Dict = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : List[str] = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_forward(): return model(a_, decoder_input_ids=a_, training=a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_forward(): return model(a_, training=a_ ) _snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : Tuple = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : str = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Tuple = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : int = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_train(): _snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0] _snake_case : str = tf.gradients(a_, model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_train(): _snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0] _snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables ) return gradients _snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(a_, repeat=1, number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case : Dict = timeit.repeat( a_, repeat=self.args.repeat, number=10, ) return min(a_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _snake_case : List[Any] = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _snake_case : Optional[Any] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ ) _snake_case : List[str] = meminfo.used _snake_case : Any = Memory(a_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _snake_case : List[Any] = None else: _snake_case : int = measure_peak_memory_cpu(a_ ) _snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case : Tuple = stop_memory_tracing(a_ ) if memory is None: _snake_case : int = summary.total else: _snake_case : int = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) return "N/A", None
28
1
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch A_ = random.Random() def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int]=1.0 , snake_case__ : Any=None , snake_case__ : Optional[int]=None ): """simple docstring""" if rng is None: _snake_case : Any = global_rng _snake_case : Dict = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class lowercase( unittest.TestCase ): '''simple docstring''' def __init__( self: Dict, a_: str, a_: List[str]=7, a_: str=400, a_: int=2_000, a_: Any=1, a_: Tuple=0.0, a_: Dict=16_000, a_: List[Any]=True, a_: Optional[Any]=80, a_: int=16, a_: Any=64, a_: int="hann_window", a_: int=80, a_: List[Any]=7_600, a_: Optional[Any]=1E-10, a_: Dict=True, ): '''simple docstring''' _snake_case : Union[str, Any] = parent _snake_case : Any = batch_size _snake_case : List[Any] = min_seq_length _snake_case : str = max_seq_length _snake_case : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _snake_case : Dict = feature_size _snake_case : Dict = padding_value _snake_case : Optional[int] = sampling_rate _snake_case : Any = do_normalize _snake_case : List[str] = num_mel_bins _snake_case : Union[str, Any] = hop_length _snake_case : Optional[Any] = win_length _snake_case : Any = win_function _snake_case : Optional[int] = fmin _snake_case : Union[str, Any] = fmax _snake_case : List[Any] = mel_floor _snake_case : Tuple = return_attention_mask def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def UpperCamelCase_ ( self: List[str], a_: int=False, a_: Any=False ): '''simple docstring''' def _flatten(a_: int ): return list(itertools.chain(*a_ ) ) if equal_length: _snake_case : Dict = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size _snake_case : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: _snake_case : Union[str, Any] = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any]=False, a_: List[str]=False ): '''simple docstring''' if equal_length: _snake_case : Optional[Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _snake_case : Optional[int] = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: _snake_case : Optional[Any] = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs @require_torch class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = SpeechTaFeatureExtractor def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Dict = SpeechTaFeatureExtractionTester(self ) def UpperCamelCase_ ( self: List[str], a_: List[str] ): '''simple docstring''' self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1E-3 ) ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _snake_case : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] _snake_case : List[str] = [np.asarray(a_ ) for speech_input in speech_inputs] # Test not batched input _snake_case : Optional[int] = feat_extract(speech_inputs[0], return_tensors="""np""" ).input_values _snake_case : Dict = feat_extract(np_speech_inputs[0], return_tensors="""np""" ).input_values self.assertTrue(np.allclose(a_, a_, atol=1E-3 ) ) # Test batched _snake_case : str = feat_extract(a_, return_tensors="""np""" ).input_values _snake_case : Any = feat_extract(a_, return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(a_, a_ ): self.assertTrue(np.allclose(a_, a_, atol=1E-3 ) ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case : List[Any] = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] _snake_case : int = ["""longest""", """max_length""", """do_not_pad"""] _snake_case : List[str] = [None, 1_600, None] for max_length, padding in zip(a_, a_ ): _snake_case : str = feat_extract(a_, padding=a_, max_length=a_, return_tensors="""np""" ) _snake_case : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_000] ) self.assertTrue(input_values[0][1_000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_200] ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case : Dict = range(800, 1_400, 200 ) _snake_case : List[Any] = [floats_list((1, x) )[0] for x in lengths] _snake_case : Tuple = ["""longest""", """max_length""", """do_not_pad"""] _snake_case : Any = [None, 1_600, None] for max_length, padding in zip(a_, a_ ): _snake_case : str = feat_extract(a_, max_length=a_, padding=a_ ) _snake_case : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1_000] ) self._check_zero_mean_unit_variance(input_values[2][:1_200] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] _snake_case : List[str] = feat_extract( a_, truncation=a_, max_length=1_000, padding="""max_length""", return_tensors="""np""" ) _snake_case : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] _snake_case : Tuple = feat_extract( a_, truncation=a_, max_length=1_000, padding="""longest""", return_tensors="""np""" ) _snake_case : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1_000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_000) ) _snake_case : str = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] _snake_case : int = feat_extract( a_, truncation=a_, max_length=2_000, padding="""longest""", return_tensors="""np""" ) _snake_case : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1_000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_200) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case : int = np.random.rand(100 ).astype(np.floataa ) _snake_case : Optional[int] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _snake_case : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}], return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) _snake_case : Optional[int] = feature_extractor.pad([{"""input_values""": inputs}], return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _snake_case : List[Any] = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] _snake_case : Tuple = [np.asarray(a_ ) for speech_input in speech_inputs] # Test feature size _snake_case : Tuple = feature_extractor(audio_target=a_, padding=a_, return_tensors="""np""" ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input _snake_case : Any = feature_extractor(speech_inputs[0], return_tensors="""np""" ).input_values _snake_case : Tuple = feature_extractor(np_speech_inputs[0], return_tensors="""np""" ).input_values self.assertTrue(np.allclose(a_, a_, atol=1E-3 ) ) # Test batched _snake_case : Any = feature_extractor(a_, return_tensors="""np""" ).input_values _snake_case : Optional[Any] = feature_extractor(a_, return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(a_, a_ ): self.assertTrue(np.allclose(a_, a_, atol=1E-3 ) ) # Test 2-D numpy arrays are batched. _snake_case : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] _snake_case : Dict = np.asarray(a_ ) _snake_case : List[Any] = feature_extractor(a_, return_tensors="""np""" ).input_values _snake_case : Optional[int] = feature_extractor(a_, return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(a_, a_ ): self.assertTrue(np.allclose(a_, a_, atol=1E-3 ) ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : List[Any] = self.feat_extract_tester.prepare_inputs_for_target() _snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) _snake_case : Dict = feat_extract.model_input_names[0] _snake_case : List[Any] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_, processed_features[input_name] ) ) ) _snake_case : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ ) _snake_case : Tuple = BatchFeature({input_name: speech_inputs}, tensor_type="""np""" ) _snake_case : str = processed_features[input_name] if len(batch_features_input.shape ) < 3: _snake_case : int = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ ) _snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) _snake_case : List[Any] = feat_extract.model_input_names[0] _snake_case : Optional[int] = BatchFeature({input_name: speech_inputs}, tensor_type="""pt""" ) _snake_case : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: _snake_case : Tuple = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) _snake_case : str = self.feat_extract_tester.prepare_inputs_for_target() _snake_case : Dict = feat_extract.model_input_names[0] _snake_case : Optional[Any] = BatchFeature({input_name: speech_inputs} ) _snake_case : str = feat_extract.num_mel_bins # hack! _snake_case : Any = feat_extract.pad(a_, padding="""longest""", return_tensors="""np""" )[input_name] _snake_case : Optional[Any] = feat_extract.pad(a_, padding="""longest""", return_tensors="""pt""" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Union[str, Any] = self.feat_extract_dict _snake_case : int = True _snake_case : List[str] = self.feature_extraction_class(**a_ ) _snake_case : int = self.feat_extract_tester.prepare_inputs_for_target() _snake_case : Any = [len(a_ ) for x in speech_inputs] _snake_case : Optional[int] = feat_extract.model_input_names[0] _snake_case : Tuple = BatchFeature({input_name: speech_inputs} ) _snake_case : str = feat_extract.num_mel_bins # hack! _snake_case : Optional[Any] = feat_extract.pad(a_, padding="""longest""", return_tensors="""np""" ) self.assertIn("""attention_mask""", a_ ) self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = self.feat_extract_dict _snake_case : int = True _snake_case : Dict = self.feature_extraction_class(**a_ ) _snake_case : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target() _snake_case : Optional[int] = [len(a_ ) for x in speech_inputs] _snake_case : Optional[int] = feat_extract.model_input_names[0] _snake_case : Dict = BatchFeature({input_name: speech_inputs} ) _snake_case : Tuple = min(a_ ) _snake_case : Optional[Any] = feat_extract.num_mel_bins # hack! _snake_case : str = feat_extract.pad( a_, padding="""max_length""", max_length=a_, truncation=a_, return_tensors="""np""" ) self.assertIn("""attention_mask""", a_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] ) def UpperCamelCase_ ( self: Dict, a_: Any ): '''simple docstring''' from datasets import load_dataset _snake_case : Optional[int] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""", """clean""", split="""validation""" ) # automatic decoding with librispeech _snake_case : int = ds.sort("""id""" ).select(range(a_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Union[str, Any] = torch.tensor( [2.38_04E-03, 2.07_52E-03, 1.98_36E-03, 2.10_57E-03, 1.61_74E-03, 3.05_18E-04, 9.15_53E-05, 3.35_69E-04, 9.76_56E-04, 1.83_11E-03, 2.01_42E-03, 2.10_57E-03, 1.73_95E-03, 4.57_76E-04, -3.96_73E-04, 4.57_76E-04, 1.00_71E-03, 9.15_53E-05, 4.88_28E-04, 1.15_97E-03, 7.32_42E-04, 9.46_04E-04, 1.80_05E-03, 1.83_11E-03, 8.85_01E-04, 4.27_25E-04, 4.88_28E-04, 7.32_42E-04, 1.09_86E-03, 2.10_57E-03] ) # fmt: on _snake_case : List[str] = self._load_datasamples(1 ) _snake_case : Tuple = SpeechTaFeatureExtractor() _snake_case : Dict = feature_extractor(a_, return_tensors="""pt""" ).input_values self.assertEquals(input_values.shape, (1, 93_680) ) self.assertTrue(torch.allclose(input_values[0, :30], a_, atol=1E-6 ) ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = torch.tensor( [-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777, -3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386, -3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571, -3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] ) # fmt: on _snake_case : Tuple = self._load_datasamples(1 ) _snake_case : Dict = SpeechTaFeatureExtractor() _snake_case : str = feature_extractor(audio_target=a_, return_tensors="""pt""" ).input_values self.assertEquals(input_values.shape, (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], a_, atol=1E-4 ) )
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ): """simple docstring""" _snake_case : str = int(snake_case__ ) # Initialize Result _snake_case : str = [] # Traverse through all denomination for denomination in reversed(snake_case__ ): # Find denominations while int(snake_case__ ) >= int(snake_case__ ): total_value -= int(snake_case__ ) answer.append(snake_case__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A_ = [] A_ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): A_ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) A_ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] A_ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F'''Following is minimal change for {value}: ''') A_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
28
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class lowercase: '''simple docstring''' def __init__( self: Optional[Any], a_: Optional[Any], a_: Optional[int]=13, a_: Dict=7, a_: Optional[Any]=True, a_: Any=True, a_: Any=True, a_: Any=99, a_: List[str]=32, a_: Any=5, a_: Optional[Any]=4, a_: Dict=37, a_: List[str]="gelu", a_: Any=0.1, a_: List[Any]=0.1, a_: Any=512, a_: int=16, a_: Dict=2, a_: Dict=0.02, a_: Any=3, a_: Optional[int]=4, a_: Tuple=None, ): '''simple docstring''' _snake_case : str = parent _snake_case : Dict = batch_size _snake_case : Tuple = seq_length _snake_case : Any = is_training _snake_case : Union[str, Any] = use_token_type_ids _snake_case : Tuple = use_labels _snake_case : str = vocab_size _snake_case : Any = hidden_size _snake_case : List[str] = num_hidden_layers _snake_case : Tuple = num_attention_heads _snake_case : List[str] = intermediate_size _snake_case : Union[str, Any] = hidden_act _snake_case : Optional[int] = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : int = max_position_embeddings _snake_case : str = type_vocab_size _snake_case : Tuple = type_sequence_label_size _snake_case : Dict = initializer_range _snake_case : Dict = num_labels _snake_case : int = num_choices _snake_case : str = scope _snake_case : Tuple = self.vocab_size - 1 def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) _snake_case : Any = None if self.use_token_type_ids: _snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) _snake_case : Dict = None _snake_case : Dict = None _snake_case : int = None if self.use_labels: _snake_case : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) _snake_case : Tuple = ids_tensor([self.batch_size], self.num_choices ) _snake_case : List[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, ) _snake_case : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: str, a_: Optional[int], a_: List[Any], *a_: str ): '''simple docstring''' _snake_case : List[str] = OpenAIGPTModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_, token_type_ids=a_, head_mask=a_ ) _snake_case : Union[str, Any] = model(a_, token_type_ids=a_ ) _snake_case : List[str] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: List[Any], a_: str, a_: Tuple, a_: List[Any], a_: Tuple, *a_: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = OpenAIGPTLMHeadModel(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[Any] = model(a_, token_type_ids=a_, labels=a_ ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self: str, a_: Any, a_: int, a_: Dict, a_: List[str], *a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = OpenAIGPTDoubleHeadsModel(a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_, token_type_ids=a_, labels=a_ ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self: List[Any], a_: int, a_: str, a_: Optional[Any], a_: Dict, *a_: Optional[int] ): '''simple docstring''' _snake_case : int = self.num_labels _snake_case : Union[str, Any] = OpenAIGPTForSequenceClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : str = model(a_, token_type_ids=a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[str] = config_and_inputs _snake_case : List[str] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class lowercase( __a , __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowercase__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowercase__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase_ ( self: str, a_: str, a_: Dict, a_: List[str], a_: str, a_: Tuple ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int], a_: Optional[int]=False ): '''simple docstring''' _snake_case : Tuple = super()._prepare_for_class(a_, a_, return_labels=a_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _snake_case : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=a_, ) _snake_case : int = inputs_dict["""labels"""] _snake_case : str = inputs_dict["""labels"""] _snake_case : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=a_, ) _snake_case : Optional[int] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=a_ ) return inputs_dict def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[str] = OpenAIGPTModelTester(self ) _snake_case : str = ConfigTester(self, config_class=a_, n_embd=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ ) @slow def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = OpenAIGPTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch class lowercase( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(a_ ) _snake_case : Optional[Any] = torch.tensor([[481, 4_735, 544]], dtype=torch.long, device=a_ ) # the president is _snake_case : List[Any] = [ 481, 4_735, 544, 246, 963, 870, 762, 239, 244, 40_477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the _snake_case : str = model.generate(a_, do_sample=a_ ) self.assertListEqual(output_ids[0].tolist(), a_ )
28
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowercase: '''simple docstring''' def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ): '''simple docstring''' _snake_case : Optional[int] = parent _snake_case : Optional[Any] = 100 _snake_case : Any = batch_size _snake_case : List[Any] = image_size _snake_case : Optional[Any] = patch_size _snake_case : str = num_channels _snake_case : Tuple = is_training _snake_case : Tuple = use_labels _snake_case : Any = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Union[str, Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : str = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : Optional[Any] = type_sequence_label_size _snake_case : Any = initializer_range _snake_case : List[str] = scope _snake_case : int = out_indices _snake_case : Optional[Any] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case : Dict = (image_size // patch_size) ** 2 _snake_case : str = num_patches + 1 def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : List[Any] = None _snake_case : Tuple = None if self.use_labels: _snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) _snake_case : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ): '''simple docstring''' _snake_case : str = BeitModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = BeitForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.type_sequence_label_size _snake_case : Any = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case : Any = 1 _snake_case : str = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : Optional[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ): '''simple docstring''' _snake_case : List[str] = self.num_labels _snake_case : List[Any] = BeitForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() _snake_case : List[str] = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) _snake_case : str = model(a_, labels=a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = BeitModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""BEiT does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) _snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_, nn.Linear ) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Any = model_class(a_ ) _snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[Any] = [*signature.parameters.keys()] _snake_case : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]: continue _snake_case : List[Any] = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : List[Any] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _snake_case : Dict = False _snake_case : Optional[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Any = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() _snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : int = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : int = _config_zero_init(a_ ) for model_class in self.all_model_classes: _snake_case : Tuple = model_class(config=a_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = BeitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ ) _snake_case : Dict = self.default_image_processor _snake_case : Dict = prepare_img() _snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ ) # prepare bool_masked_pos _snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Optional[int] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ ) _snake_case : List[Any] = self.default_image_processor _snake_case : Any = prepare_img() _snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(**a_ ) _snake_case : Optional[int] = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(logits.shape, a_ ) _snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : str = 281 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to( a_ ) _snake_case : int = self.default_image_processor _snake_case : Optional[Any] = prepare_img() _snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Union[str, Any] = model(**a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 21_841) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : List[str] = 2_396 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : int = model.to(a_ ) _snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] ) _snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits # verify the logits _snake_case : List[str] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" ) if is_pillow_less_than_a: _snake_case : Any = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ], device=a_, ) else: _snake_case : Optional[Any] = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ], device=a_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : List[Any] = model.to(a_ ) _snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : str = Image.open(ds[0]["""file"""] ) _snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits.detach().cpu() _snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] ) _snake_case : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape, a_ ) _snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ ) _snake_case : List[str] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape, a_ )
28
1
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ = '''pt''' elif is_tf_available(): A_ = '''tf''' else: A_ = '''jax''' class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ByTaTokenizer lowercase__ = False def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' super().setUp() _snake_case : List[str] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def UpperCamelCase_ ( self: List[Any], **a_: int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ): '''simple docstring''' _snake_case : List[Any] = [] for i in range(len(a_ ) ): try: _snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) ) _snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) ) if max_length is not None and len(a_ ) > max_length: _snake_case : Tuple = toks[:max_length] if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0: while len(a_ ) < min_length: _snake_case : List[str] = toks + toks # toks_str = [t[1] for t in toks] _snake_case : Tuple = [t[0] for t in toks] # Ensure consistency _snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ ) if " " not in output_txt and len(a_ ) > 1: _snake_case : Dict = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ ) + """ """ + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ ) ) if with_prefix_space: _snake_case : Union[str, Any] = """ """ + output_txt _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) return output_txt, output_ids def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.ta_base_tokenizer _snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) _snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.ta_base_tokenizer _snake_case : Tuple = """Unicode €.""" _snake_case : List[Any] = tokenizer(a_ ) _snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : Tuple = tokenizer.decode(a_ ) self.assertEqual(a_, """Unicode €.</s>""" ) _snake_case : Tuple = tokenizer("""e è é ê ë""" ) _snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : int = tokenizer.decode(a_ ) self.assertEqual(a_, """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.ta_base_tokenizer _snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off _snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ ) self.assertIsInstance(a_, a_ ) if FRAMEWORK != "jax": _snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: _snake_case : Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a_, a_ ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""", a_ ) self.assertIn("""attention_mask""", a_ ) self.assertNotIn("""decoder_input_ids""", a_ ) self.assertNotIn("""decoder_attention_mask""", a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = self.ta_base_tokenizer _snake_case : Dict = [ """Summary of the text.""", """Another summary.""", ] _snake_case : Optional[int] = tokenizer( text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ ) self.assertEqual(32, targets["""input_ids"""].shape[1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""] _snake_case : Dict = ["""Summary of the text. </s>"""] # fmt: off _snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _snake_case : Optional[Any] = tokenizer(a_, text_target=a_ ) self.assertEqual(a_, batch["""input_ids"""][0] ) self.assertEqual(a_, batch["""labels"""][0] ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : List[str] = tempfile.mkdtemp() _snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) shutil.rmtree(a_ ) _snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) _snake_case : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) _snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : Union[str, Any] = json.load(a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : List[Any] = json.load(a_ ) _snake_case : int = [f"<extra_id_{i}>" for i in range(125 )] _snake_case : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] _snake_case : Dict = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case : Optional[int] = tokenizer_class.from_pretrained( a_, ) self.assertIn( """an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )] _snake_case : List[Any] = tokenizer_class.from_pretrained( a_, additional_special_tokens=a_, ) self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] _snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ ) self.assertIsInstance(a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Optional[int] = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _snake_case : Any = 0 _snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens( a_, skip_special_tokens=a_ ) for attr in attributes_list: setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, """additional_special_tokens_ids""", [] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] ) setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
28
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase( __a ): '''simple docstring''' lowercase__ = (IPNDMScheduler,) lowercase__ = (("num_inference_steps", 50),) def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = {"""num_train_timesteps""": 1_000} config.update(**a_ ) return config def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = dict(self.forward_default_kwargs ) _snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[Any] = self.dummy_sample _snake_case : Dict = 0.1 * sample _snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : int = self.get_scheduler_config(**a_ ) _snake_case : Dict = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : int = dummy_past_residuals[:] if time_step is None: _snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : Tuple = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : Optional[Any] = dummy_past_residuals[:] _snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[int] = self.dummy_sample _snake_case : Tuple = 0.1 * sample _snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : Any = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case : Union[str, Any] = dummy_past_residuals[:] if time_step is None: _snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : List[str] = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case : List[str] = dummy_past_residuals[:] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config(**a_ ) _snake_case : List[Any] = scheduler_class(**a_ ) _snake_case : Union[str, Any] = 10 _snake_case : Union[str, Any] = self.dummy_model() _snake_case : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : Optional[Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _snake_case : Union[str, Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample return sample def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : int = kwargs.pop("""num_inference_steps""", a_ ) for scheduler_class in self.scheduler_classes: _snake_case : Union[str, Any] = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) _snake_case : Dict = self.dummy_sample _snake_case : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ): _snake_case : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _snake_case : List[str] = dummy_past_residuals[:] _snake_case : Optional[int] = scheduler.timesteps[5] _snake_case : Optional[Any] = scheduler.timesteps[6] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.full_loop() _snake_case : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) A_ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _snake_case : Any = [] for num in range(len(snake_case__ ) ): _snake_case : Optional[int] = 0 while 2 * i * i <= odd_composites[num]: _snake_case : Optional[int] = odd_composites[num] - 2 * i * i if is_prime(snake_case__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case__ ) == n: return list_nums return [] def UpperCAmelCase__ (): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" A_ = range(2, 20 + 1) A_ = [10**k for k in range(ks[-1] + 1)] A_ = {} def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Union[str, Any] = sum(a_i[j] for j in range(snake_case__ , len(snake_case__ ) ) ) _snake_case : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(snake_case__ ) , snake_case__ ) ) ) _snake_case , _snake_case : int = 0, 0 _snake_case : List[str] = n - i _snake_case : List[str] = memo.get(snake_case__ ) if sub_memo is not None: _snake_case : Optional[Any] = sub_memo.get(snake_case__ ) if jumps is not None and len(snake_case__ ) > 0: # find and make the largest jump without going over _snake_case : Optional[int] = -1 for _k in range(len(snake_case__ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: _snake_case : Any = _k break if max_jump >= 0: _snake_case , _snake_case , _snake_case : Optional[Any] = jumps[max_jump] # since the difference between jumps is cached, add c _snake_case : List[str] = diff + c for j in range(min(snake_case__ , len(snake_case__ ) ) ): _snake_case , _snake_case : Union[str, Any] = divmod(snake_case__ , 10 ) if new_c > 0: add(snake_case__ , snake_case__ , snake_case__ ) else: _snake_case : Any = [] else: _snake_case : Dict = {c: []} _snake_case : List[Any] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps _snake_case , _snake_case : Optional[int] = next_term(snake_case__ , k - 1 , i + dn , snake_case__ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead _snake_case , _snake_case : List[Any] = compute(snake_case__ , snake_case__ , i + dn , snake_case__ ) diff += _diff dn += terms_jumped _snake_case : Dict = sub_memo[c] # keep jumps sorted by # of terms skipped _snake_case : Tuple = 0 while j < len(snake_case__ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(snake_case__ , (diff, dn, k) ) return (diff, dn) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ): """simple docstring""" if i >= n: return 0, i if k > len(snake_case__ ): a_i.extend([0 for _ in range(k - len(snake_case__ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) _snake_case : Tuple = i _snake_case , _snake_case , _snake_case : Optional[Any] = 0, 0, 0 for j in range(len(snake_case__ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 _snake_case : Optional[Any] = ds_c + ds_b diff += addend _snake_case : int = 0 for j in range(snake_case__ ): _snake_case : Optional[int] = a_i[j] + addend _snake_case , _snake_case : str = divmod(snake_case__ , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(snake_case__ , snake_case__ , snake_case__ ) return diff, i - start_i def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" for j in range(snake_case__ , len(snake_case__ ) ): _snake_case : Any = digits[j] + addend if s >= 10: _snake_case , _snake_case : List[Any] = divmod(snake_case__ , 10 ) _snake_case : List[str] = addend // 10 + quotient else: _snake_case : str = s _snake_case : Dict = addend // 10 if addend == 0: break while addend > 0: _snake_case , _snake_case : Any = divmod(snake_case__ , 10 ) digits.append(snake_case__ ) def UpperCAmelCase__ (snake_case__ : int = 10**15 ): """simple docstring""" _snake_case : Any = [1] _snake_case : Tuple = 1 _snake_case : int = 0 while True: _snake_case , _snake_case : str = next_term(snake_case__ , 20 , i + dn , snake_case__ ) dn += terms_jumped if dn == n - i: break _snake_case : Any = 0 for j in range(len(snake_case__ ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F'''{solution() = }''')
28
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ): '''simple docstring''' _snake_case : Optional[int] = device _snake_case : str = CLIPTokenizerFast.from_pretrained(a_ ) _snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073] _snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std ) _snake_case : Optional[int] = torchvision.transforms.Resize(224 ) _snake_case : str = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self: List[str], a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.resize(a_ ) _snake_case : List[Any] = self.center_crop(a_ ) _snake_case : Optional[Any] = self.normalize(a_ ) return images def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer(text=a_, **a_ ) _snake_case : Any = self.preprocess_img(a_ ) _snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ): '''simple docstring''' super().__init__() _snake_case : int = None _snake_case : List[str] = device if device else get_device() if vqgan: _snake_case : Any = vqgan else: _snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ ) self.vqgan.eval() if clip: _snake_case : Tuple = clip else: _snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _snake_case : List[str] = ProcessorGradientFlow(device=self.device ) _snake_case : Union[str, Any] = iterations _snake_case : Dict = lr _snake_case : Optional[int] = log _snake_case : List[str] = make_grid _snake_case : Union[str, Any] = return_val _snake_case : List[str] = quantize _snake_case : List[str] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ): '''simple docstring''' _snake_case : Dict = [] if output_path is None: _snake_case : Tuple = """./animation.gif""" if input_path is None: _snake_case : Any = self.save_path _snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) ) if not len(a_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(a_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _snake_case : List[Any] = total_duration / len(a_ ) _snake_case : Optional[Any] = [frame_duration] * len(a_ ) if extend_frames: _snake_case : Optional[int] = 1.5 _snake_case : int = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(a_ ) ) imageio.mimsave(a_, a_, duration=a_ ) print(f"gif saved to {output_path}" ) def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ): '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device ) _snake_case : int = preprocess_vqgan(a_ ) _snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ ) return z def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.latent.detach().requires_grad_() _snake_case : Tuple = base_latent + transform_vector if self.quantize: _snake_case , *_snake_case : Any = self.vqgan.quantize(a_ ) else: _snake_case : List[Any] = trans_latent return self.vqgan.decode(a_ ) def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ): '''simple docstring''' _snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ ) _snake_case : Any = self.clip(**a_ ) _snake_case : str = clip_outputs.logits_per_image if weights is not None: _snake_case : Any = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] ) else: _snake_case : Tuple = torch.tensor([1], device=self.device ) _snake_case : int = -torch.log(a_ ) + torch.log(a_ ) return loss def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ): '''simple docstring''' _snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device ) _snake_case : Dict = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _snake_case : str = self._add_vector(a_ ) _snake_case : List[Any] = loop_post_process(a_ ) _snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ ) print("""CLIP loss""", a_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=a_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ): '''simple docstring''' wandb.init(reinit=a_, project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _snake_case : Any = Image.open(a_ ) _snake_case : str = image.resize((256, 256) ) wandb.log("""Original Image""", wandb.Image(a_ ) ) def UpperCamelCase_ ( self: str, a_: List[Any] ): '''simple docstring''' if not prompts: return [] _snake_case : List[str] = [] _snake_case : Tuple = [] if isinstance(a_, a_ ): _snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(a_, (tuple, list) ): _snake_case : List[Any] = prompt[0] _snake_case : Optional[Any] = float(prompt[1] ) elif ":" in prompt: _snake_case , _snake_case : List[Any] = prompt.split(""":""" ) _snake_case : str = float(a_ ) else: _snake_case : int = prompt _snake_case : Union[str, Any] = 1.0 processed_prompts.append(a_ ) weights.append(a_ ) return { "prompts": processed_prompts, "weights": torch.tensor(a_, device=self.device ), } def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ): '''simple docstring''' if image_path: _snake_case : Union[str, Any] = self._get_latent(a_ ) else: _snake_case : Any = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(a_, a_, a_ ) assert pos_prompts, "You must provide at least one positive prompt." _snake_case : str = self.process_prompts(a_ ) _snake_case : Dict = self.process_prompts(a_ ) if save_final and save_path is None: _snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(a_ ): os.makedirs(a_ ) else: _snake_case : List[Any] = save_path + """_""" + get_timestamp() os.makedirs(a_ ) _snake_case : Optional[Any] = save_path _snake_case : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(a_ ) ) _snake_case : List[Any] = loop_post_process(a_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ): if show_intermediate: show_pil(a_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(a_ )} ) if show_final: show_pil(a_ ) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
28
1
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def UpperCAmelCase__ (): """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join _snake_case : Dict = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , snake_case__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def UpperCAmelCase__ (): """simple docstring""" assert _test_patching.open is open _snake_case : Any = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , snake_case__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , snake_case__ ): pass def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , snake_case__ ) is None with patch_submodule(_test_patching , """len""" , snake_case__ ): assert _test_patching.len is mock assert _test_patching.len is len def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[int] = """__test_patch_submodule_start_and_stop_mock__""" _snake_case : Dict = patch_submodule(_test_patching , """open""" , snake_case__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def UpperCAmelCase__ (): """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join _snake_case : Any = """__test_patch_submodule_successive_join__""" _snake_case : Union[str, Any] = """__test_patch_submodule_successive_dirname__""" _snake_case : Tuple = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , snake_case__ ): with patch_submodule(_test_patching , """os.rename""" , snake_case__ ): with patch_submodule(_test_patching , """os.path.dirname""" , snake_case__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , snake_case__ ): with patch_submodule(_test_patching , """os.path.join""" , snake_case__ ): with patch_submodule(_test_patching , """os.path.dirname""" , snake_case__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , snake_case__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , snake_case__ ): pass
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _snake_case : Dict = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase( __a ): '''simple docstring''' lowercase__ = (IPNDMScheduler,) lowercase__ = (("num_inference_steps", 50),) def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = {"""num_train_timesteps""": 1_000} config.update(**a_ ) return config def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = dict(self.forward_default_kwargs ) _snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[Any] = self.dummy_sample _snake_case : Dict = 0.1 * sample _snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : int = self.get_scheduler_config(**a_ ) _snake_case : Dict = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : int = dummy_past_residuals[:] if time_step is None: _snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : Tuple = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : Optional[Any] = dummy_past_residuals[:] _snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[int] = self.dummy_sample _snake_case : Tuple = 0.1 * sample _snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : Any = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case : Union[str, Any] = dummy_past_residuals[:] if time_step is None: _snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : List[str] = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case : List[str] = dummy_past_residuals[:] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config(**a_ ) _snake_case : List[Any] = scheduler_class(**a_ ) _snake_case : Union[str, Any] = 10 _snake_case : Union[str, Any] = self.dummy_model() _snake_case : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : Optional[Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _snake_case : Union[str, Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample return sample def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : int = kwargs.pop("""num_inference_steps""", a_ ) for scheduler_class in self.scheduler_classes: _snake_case : Union[str, Any] = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) _snake_case : Dict = self.dummy_sample _snake_case : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ): _snake_case : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _snake_case : List[str] = dummy_past_residuals[:] _snake_case : Optional[int] = scheduler.timesteps[5] _snake_case : Optional[Any] = scheduler.timesteps[6] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.full_loop() _snake_case : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
28
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ): '''simple docstring''' _snake_case : int = parent _snake_case : int = batch_size _snake_case : List[Any] = image_size _snake_case : List[str] = num_channels _snake_case : Tuple = num_stages _snake_case : Union[str, Any] = hidden_sizes _snake_case : List[Any] = depths _snake_case : Tuple = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[Any] = num_labels _snake_case : Tuple = initializer_range _snake_case : Tuple = out_features _snake_case : Tuple = out_indices _snake_case : Dict = scope def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size], self.num_labels ) _snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : int = ConvNextVaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = ConvNextVaForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : List[str] = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Tuple = None _snake_case : Tuple = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : str = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = ConvNextVaModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : List[Any] = True if model_class.__name__ in [ *get_values(a_ ), *get_values(a_ ), ]: continue _snake_case : Tuple = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Any = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : Any = False _snake_case : List[Any] = True if ( model_class.__name__ in [*get_values(a_ ), *get_values(a_ )] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Dict = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() _snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Optional[int] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : int = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[str] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = ConvNextVaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowercase( unittest.TestCase ): '''simple docstring''' def __init__( self: Union[str, Any], a_: int, a_: Optional[int]=7, a_: Any=3, a_: Any=18, a_: Union[str, Any]=30, a_: Union[str, Any]=400, a_: Any=True, a_: Tuple=None, a_: Tuple=True, ): '''simple docstring''' _snake_case : Tuple = size if size is not None else {"""height""": 18, """width""": 18} _snake_case : Dict = parent _snake_case : int = batch_size _snake_case : Tuple = num_channels _snake_case : Optional[int] = image_size _snake_case : Dict = min_resolution _snake_case : Optional[int] = max_resolution _snake_case : Any = do_resize _snake_case : Optional[int] = size _snake_case : Tuple = do_normalize def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ImageGPTImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : str = ImageGPTImageProcessingTester(self ) @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_, """clusters""" ) ) self.assertTrue(hasattr(a_, """do_resize""" ) ) self.assertTrue(hasattr(a_, """size""" ) ) self.assertTrue(hasattr(a_, """do_normalize""" ) ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"""height""": 18, """width""": 18} ) _snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {"""height""": 42, """width""": 42} ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) _snake_case : str = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(a_, obj[key] ) ) else: self.assertEqual(obj[key], a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _snake_case : List[str] = os.path.join(a_, """image_processor.json""" ) image_processor_first.to_json_file(a_ ) _snake_case : Tuple = self.image_processing_class.from_json_file(a_ ).to_dict() _snake_case : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(a_, image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key], a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Any = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(a_ ) _snake_case : Any = self.image_processing_class.from_pretrained(a_ ).to_dict() _snake_case : Optional[Any] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(a_, image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key], a_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) _snake_case : Dict = Image.open(dataset[4]["""file"""] ) _snake_case : Union[str, Any] = Image.open(dataset[5]["""file"""] ) _snake_case : Optional[Any] = [imagea, imagea] return images @require_vision @require_torch class lowercase( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : str = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) _snake_case : List[str] = prepare_images() # test non-batched _snake_case : Tuple = image_processing(images[0], return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids, torch.LongTensor ) self.assertEqual(encoding.input_ids.shape, (1, 1_024) ) _snake_case : Any = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist(), a_ ) # test batched _snake_case : Union[str, Any] = image_processing(a_, return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids, torch.LongTensor ) self.assertEqual(encoding.input_ids.shape, (2, 1_024) ) _snake_case : Tuple = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist(), a_ )
28
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[Any] = features.copy() if features else default_expected_features _snake_case : List[Any] = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ): """simple docstring""" if issubclass(snake_case__ , snake_case__ ): _snake_case : Optional[Any] = parquet_path elif issubclass(snake_case__ , snake_case__ ): _snake_case : int = [parquet_path] _snake_case : Union[str, Any] = tmp_path / """cache""" _snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) for split in splits: _snake_case : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = tmp_path / """cache""" _snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[int] = tmp_path / """cache""" _snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Optional[Any] = features.copy() if features else default_expected_features _snake_case : Dict = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" if split: _snake_case : int = {split: parquet_path} else: _snake_case : Optional[Any] = """train""" _snake_case : int = {"""train""": parquet_path, """test""": parquet_path} _snake_case : Dict = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ): """simple docstring""" _snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" ) _snake_case : int = pf.read() assert dataset.data.table == output_table def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" ) _snake_case : Tuple = {"""image""": [image_path]} _snake_case : Optional[int] = Features({"""image""": Image()} ) _snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ ) _snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" assert get_writer_batch_size(snake_case__ ) == expected
28
1
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class lowercase( __a ): '''simple docstring''' def UpperCamelCase_ ( self: int, a_: Any=None, a_: Optional[int]=None, a_: Dict=None, **a_: Dict ): '''simple docstring''' if tokenize_kwargs is None: _snake_case : Union[str, Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) _snake_case : List[str] = truncation _snake_case : Dict = tokenize_kwargs _snake_case : Any = {} if return_tensors is not None: _snake_case : Any = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self: Dict, a_: Any, **a_: Optional[Any] ): '''simple docstring''' _snake_case : Tuple = self.framework _snake_case : Optional[Any] = self.tokenizer(a_, return_tensors=a_, **a_ ) return model_inputs def UpperCamelCase_ ( self: Optional[int], a_: str ): '''simple docstring''' _snake_case : Optional[Any] = self.model(**a_ ) return model_outputs def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: str=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self: Tuple, *a_: Union[str, Any], **a_: Any ): '''simple docstring''' return super().__call__(*a_, **a_ )
28
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ): '''simple docstring''' _snake_case : Dict = parent _snake_case : Dict = batch_size _snake_case : Optional[Any] = image_size _snake_case : int = num_channels _snake_case : Tuple = num_stages _snake_case : int = hidden_sizes _snake_case : List[str] = depths _snake_case : str = is_training _snake_case : Dict = use_labels _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_act _snake_case : Any = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : Union[str, Any] = out_features _snake_case : Dict = num_labels _snake_case : int = scope _snake_case : Dict = num_stages def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Optional[int] = None if self.use_labels: _snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ): '''simple docstring''' _snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = UperNetModelTester(self ) _snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Tuple = [*signature.parameters.keys()] _snake_case : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: str ): '''simple docstring''' def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : List[str] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : Optional[int] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = _config_zero_init(a_ ) _snake_case : Dict = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ ) _snake_case : Dict = prepare_img() _snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Tuple = model(**a_ ) _snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : int = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ ) _snake_case : List[str] = prepare_img() _snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Optional[Any] = model(**a_ ) _snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A_ = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path A_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) A_ = [ord(letter) for letter in string.ascii_lowercase] A_ = {ord(char) for char in VALID_CHARS} A_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ): """simple docstring""" _snake_case : str = "" _snake_case : int _snake_case : int _snake_case : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): _snake_case : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def UpperCAmelCase__ (snake_case__ : list[int] ): """simple docstring""" _snake_case : list[str] = [] for key in product(snake_case__ , repeat=3 ): _snake_case : List[Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ): """simple docstring""" _snake_case : list[int] _snake_case : list[str] _snake_case : str _snake_case : str _snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" ) _snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )] _snake_case : Optional[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: _snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break _snake_case : Optional[int] = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params A_ = getLogger(__name__) A_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : int = 8 , snake_case__ : str = DEFAULT_DEVICE , snake_case__ : Optional[Any]=False , snake_case__ : List[Any]="summarization" , snake_case__ : Optional[int]=None , **snake_case__ : List[Any] , ): """simple docstring""" _snake_case : str = Path(snake_case__ ).open("""w""" , encoding="""utf-8""" ) _snake_case : Optional[int] = str(snake_case__ ) _snake_case : str = AutoModelForSeqaSeqLM.from_pretrained(snake_case__ ).to(snake_case__ ) if fpaa: _snake_case : str = model.half() _snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. _snake_case : int = time.time() # update config with task specific params use_task_specific_params(snake_case__ , snake_case__ ) if prefix is None: _snake_case : Dict = prefix or getattr(model.config , """prefix""" , """""" ) or """""" for examples_chunk in tqdm(list(chunks(snake_case__ , snake_case__ ) ) ): _snake_case : Optional[int] = [prefix + text for text in examples_chunk] _snake_case : Optional[int] = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ ) _snake_case : Optional[int] = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case__ , ) _snake_case : Union[str, Any] = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) for hypothesis in dec: fout.write(hypothesis + """\n""" ) fout.flush() fout.close() _snake_case : Any = int(time.time() - start_time ) # seconds _snake_case : Union[str, Any] = len(snake_case__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def UpperCAmelCase__ (): """simple docstring""" return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" ) def UpperCAmelCase__ (snake_case__ : str=True ): """simple docstring""" _snake_case : int = argparse.ArgumentParser() parser.add_argument("""model_name""" , type=snake_case__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""input_path""" , type=snake_case__ , help="""like cnn_dm/test.source""" ) parser.add_argument("""save_path""" , type=snake_case__ , help="""where to save summaries""" ) parser.add_argument("""--reference_path""" , type=snake_case__ , required=snake_case__ , help="""like cnn_dm/test.target""" ) parser.add_argument("""--score_path""" , type=snake_case__ , required=snake_case__ , default="""metrics.json""" , help="""where to save metrics""" ) parser.add_argument("""--device""" , type=snake_case__ , required=snake_case__ , default=snake_case__ , help="""cuda, cuda:1, cpu etc.""" ) parser.add_argument( """--prefix""" , type=snake_case__ , required=snake_case__ , default=snake_case__ , help="""will be added to the begininng of src examples""" ) parser.add_argument("""--task""" , type=snake_case__ , default="""summarization""" , help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""" , type=snake_case__ , default=8 , required=snake_case__ , help="""batch size""" ) parser.add_argument( """--n_obs""" , type=snake_case__ , default=-1 , required=snake_case__ , help="""How many observations. Defaults to all.""" ) parser.add_argument("""--fp16""" , action="""store_true""" ) parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" ) parser.add_argument( """--info""" , nargs="""?""" , type=snake_case__ , const=datetime_now() , help=( """use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g.""" """ lang=en-ru. If no value is passed, the current datetime string will be used.""" ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate _snake_case , _snake_case : List[Any] = parser.parse_known_args() _snake_case : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case__ ) if parsed_args and verbose: print(F"parsed the following generate kwargs: {parsed_args}" ) _snake_case : Optional[int] = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: _snake_case : str = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=snake_case__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("""Can't mix --fp16 and --device cpu""" ) _snake_case : Any = generate_summaries_or_translations( snake_case__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case__ , ) if args.reference_path is None: return {} # Compute scores _snake_case : Union[str, Any] = calculate_bleu if """translation""" in args.task else calculate_rouge _snake_case : str = [x.rstrip() for x in open(args.save_path ).readlines()] _snake_case : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case__ )] _snake_case : dict = score_fn(snake_case__ , snake_case__ ) scores.update(snake_case__ ) if args.dump_args: scores.update(snake_case__ ) if args.info: _snake_case : Union[str, Any] = args.info if verbose: print(snake_case__ ) if args.score_path is not None: json.dump(snake_case__ , open(args.score_path , """w""" ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
28
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "feature_extractor"] lowercase__ = "TvltImageProcessor" lowercase__ = "TvltFeatureExtractor" def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ): '''simple docstring''' super().__init__(image_processor=a_, feature_extractor=a_ ) _snake_case : Any = image_processor _snake_case : Dict = feature_extractor def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ): '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) _snake_case : Optional[int] = None if images is not None: _snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ ) if images_mixed is not None: _snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ ) if audio is not None: _snake_case : Any = self.feature_extractor( a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ ) _snake_case : List[str] = {} if audio is not None: output_dict.update(a_ ) if images is not None: output_dict.update(a_ ) if images_mixed_dict is not None: output_dict.update(a_ ) return output_dict @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.image_processor.model_input_names _snake_case : List[str] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
28
1
"""simple docstring""" from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
28
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ = '''pt''' elif is_tf_available(): A_ = '''tf''' else: A_ = '''jax''' class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ByTaTokenizer lowercase__ = False def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' super().setUp() _snake_case : List[str] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def UpperCamelCase_ ( self: List[Any], **a_: int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ): '''simple docstring''' _snake_case : List[Any] = [] for i in range(len(a_ ) ): try: _snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) ) _snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) ) if max_length is not None and len(a_ ) > max_length: _snake_case : Tuple = toks[:max_length] if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0: while len(a_ ) < min_length: _snake_case : List[str] = toks + toks # toks_str = [t[1] for t in toks] _snake_case : Tuple = [t[0] for t in toks] # Ensure consistency _snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ ) if " " not in output_txt and len(a_ ) > 1: _snake_case : Dict = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ ) + """ """ + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ ) ) if with_prefix_space: _snake_case : Union[str, Any] = """ """ + output_txt _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) return output_txt, output_ids def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.ta_base_tokenizer _snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) _snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.ta_base_tokenizer _snake_case : Tuple = """Unicode €.""" _snake_case : List[Any] = tokenizer(a_ ) _snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : Tuple = tokenizer.decode(a_ ) self.assertEqual(a_, """Unicode €.</s>""" ) _snake_case : Tuple = tokenizer("""e è é ê ë""" ) _snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : int = tokenizer.decode(a_ ) self.assertEqual(a_, """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.ta_base_tokenizer _snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off _snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ ) self.assertIsInstance(a_, a_ ) if FRAMEWORK != "jax": _snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: _snake_case : Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a_, a_ ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""", a_ ) self.assertIn("""attention_mask""", a_ ) self.assertNotIn("""decoder_input_ids""", a_ ) self.assertNotIn("""decoder_attention_mask""", a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = self.ta_base_tokenizer _snake_case : Dict = [ """Summary of the text.""", """Another summary.""", ] _snake_case : Optional[int] = tokenizer( text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ ) self.assertEqual(32, targets["""input_ids"""].shape[1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""] _snake_case : Dict = ["""Summary of the text. </s>"""] # fmt: off _snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _snake_case : Optional[Any] = tokenizer(a_, text_target=a_ ) self.assertEqual(a_, batch["""input_ids"""][0] ) self.assertEqual(a_, batch["""labels"""][0] ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : List[str] = tempfile.mkdtemp() _snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) shutil.rmtree(a_ ) _snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) _snake_case : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) _snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : Union[str, Any] = json.load(a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : List[Any] = json.load(a_ ) _snake_case : int = [f"<extra_id_{i}>" for i in range(125 )] _snake_case : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] _snake_case : Dict = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case : Optional[int] = tokenizer_class.from_pretrained( a_, ) self.assertIn( """an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )] _snake_case : List[Any] = tokenizer_class.from_pretrained( a_, additional_special_tokens=a_, ) self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] _snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ ) self.assertIsInstance(a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Optional[int] = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _snake_case : Any = 0 _snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens( a_, skip_special_tokens=a_ ) for attr in attributes_list: setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, """additional_special_tokens_ids""", [] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] ) setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
28
1
"""simple docstring""" import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowercase( __a ): '''simple docstring''' def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(a_, """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(a_, """neck_hidden_sizes""" ) ) self.parent.assertTrue(hasattr(a_, """num_attention_heads""" ) ) class lowercase: '''simple docstring''' def __init__( self: Dict, a_: str, a_: Tuple=13, a_: List[str]=32, a_: Dict=2, a_: Optional[int]=3, a_: Union[str, Any]=640, a_: Tuple=4, a_: Optional[Any]="silu", a_: Tuple=3, a_: str=32, a_: Optional[Any]=0.1, a_: Any=0.1, a_: Optional[Any]=0.1, a_: int=0.02, a_: int=True, a_: int=True, a_: Union[str, Any]=10, a_: Optional[Any]=None, ): '''simple docstring''' _snake_case : List[Any] = parent _snake_case : Tuple = batch_size _snake_case : Any = image_size _snake_case : List[Any] = patch_size _snake_case : List[Any] = num_channels _snake_case : Optional[Any] = last_hidden_size _snake_case : Tuple = num_attention_heads _snake_case : Optional[Any] = hidden_act _snake_case : int = conv_kernel_size _snake_case : Dict = output_stride _snake_case : Any = hidden_dropout_prob _snake_case : int = attention_probs_dropout_prob _snake_case : Optional[Any] = classifier_dropout_prob _snake_case : str = use_labels _snake_case : List[Any] = is_training _snake_case : Any = num_labels _snake_case : Union[str, Any] = initializer_range _snake_case : Any = scope def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Dict = None _snake_case : str = None if self.use_labels: _snake_case : Optional[int] = ids_tensor([self.batch_size], self.num_labels ) _snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) _snake_case : Union[str, Any] = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self: Any ): '''simple docstring''' return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self: List[str], a_: Any, a_: Optional[int], a_: Dict, a_: Any ): '''simple docstring''' _snake_case : Union[str, Any] = MobileViTModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Optional[Any] = model(a_ ) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self: Dict, a_: Optional[int], a_: Tuple, a_: List[str], a_: List[Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.num_labels _snake_case : Dict = MobileViTForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: str, a_: Any, a_: Union[str, Any], a_: Tuple, a_: int ): '''simple docstring''' _snake_case : Optional[Any] = self.num_labels _snake_case : Tuple = MobileViTForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) _snake_case : Tuple = model(a_, labels=a_ ) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = MobileViTModelTester(self ) _snake_case : Dict = MobileViTConfigTester(self, config_class=a_, has_text_modality=a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViT does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""MobileViT does not support input and output embeddings""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""MobileViT does not output attentions""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Any = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[Any] = [*signature.parameters.keys()] _snake_case : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: Union[str, Any], a_: int, a_: int ): _snake_case : Union[str, Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Union[str, Any] = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : List[Any] = outputs.hidden_states _snake_case : Any = 5 self.assertEqual(len(a_ ), a_ ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _snake_case : Union[str, Any] = 2 for i in range(len(a_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2 ) _snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : str = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = MobileViTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : Optional[int] = prepare_img() _snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Union[str, Any] = model(**a_ ) # verify the logits _snake_case : List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : str = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) _snake_case : Union[str, Any] = model.to(a_ ) _snake_case : int = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) _snake_case : List[Any] = prepare_img() _snake_case : str = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Dict = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits # verify the logits _snake_case : List[Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [ [[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]], [[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]], [[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]], ], device=a_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : List[Any] = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) _snake_case : Dict = model.to(a_ ) _snake_case : str = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) _snake_case : Any = prepare_img() _snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Tuple = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits.detach().cpu() _snake_case : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(50, 60)] ) _snake_case : str = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape, a_ ) _snake_case : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=a_ ) _snake_case : Optional[Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape, a_ )
28
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase( __a ): '''simple docstring''' @staticmethod @abstractmethod def UpperCamelCase_ ( a_: ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' raise NotImplementedError()
28
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } A_ = { '''distilbert-base-uncased''': 5_12, '''distilbert-base-uncased-distilled-squad''': 5_12, '''distilbert-base-cased''': 5_12, '''distilbert-base-cased-distilled-squad''': 5_12, '''distilbert-base-german-cased''': 5_12, '''distilbert-base-multilingual-cased''': 5_12, } A_ = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = PRETRAINED_INIT_CONFIGURATION lowercase__ = ["input_ids", "attention_mask"] lowercase__ = DistilBertTokenizer def __init__( self: int, a_: Union[str, Any]=None, a_: int=None, a_: str=True, a_: Dict="[UNK]", a_: List[str]="[SEP]", a_: Dict="[PAD]", a_: Union[str, Any]="[CLS]", a_: Dict="[MASK]", a_: Optional[Any]=True, a_: List[Any]=None, **a_: Optional[int], ): '''simple docstring''' super().__init__( a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, ) _snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""", a_ ) != do_lower_case or normalizer_state.get("""strip_accents""", a_ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars ): _snake_case : Optional[Any] = getattr(a_, normalizer_state.pop("""type""" ) ) _snake_case : List[Any] = do_lower_case _snake_case : Optional[Any] = strip_accents _snake_case : Optional[int] = tokenize_chinese_chars _snake_case : Dict = normalizer_class(**a_ ) _snake_case : int = do_lower_case def UpperCamelCase_ ( self: Tuple, a_: Union[str, Any], a_: str=None ): '''simple docstring''' _snake_case : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self: Optional[int], a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : List[Any] = [self.sep_token_id] _snake_case : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: Optional[str] = None ): '''simple docstring''' _snake_case : Optional[Any] = self._tokenizer.model.save(a_, name=a_ ) return tuple(a_ )
28
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowercase( __a ): '''simple docstring''' lowercase__ = "roformer" def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ): '''simple docstring''' super().__init__(pad_token_id=a_, **a_ ) _snake_case : int = vocab_size _snake_case : int = hidden_size if embedding_size is None else embedding_size _snake_case : Dict = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Dict = hidden_act _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : List[Any] = initializer_range _snake_case : List[Any] = layer_norm_eps _snake_case : Optional[Any] = rotary_value _snake_case : List[str] = use_cache class lowercase( __a ): '''simple docstring''' @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' if self.task == "multiple-choice": _snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case : List[str] = {0: """batch""", 1: """sequence"""} _snake_case : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
28
1
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def UpperCAmelCase__ (): """simple docstring""" _snake_case , _snake_case : Any = 9, 14 # noqa: F841 _snake_case : Any = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _snake_case : int = defaultdict(snake_case__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _snake_case : Union[str, Any] = mst(snake_case__ ) _snake_case : Any = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _snake_case : Union[str, Any] = tuple(answer[:2] ) _snake_case : Optional[Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
28
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ): """simple docstring""" _snake_case : Optional[Any] = [] for old_item in old_list: _snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" ) _snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" ) _snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" ) _snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" ) _snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ): """simple docstring""" _snake_case : Dict = [] for old_item in old_list: _snake_case : Dict = old_item _snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _snake_case : Union[str, Any] = old_checkpoint[path] _snake_case : Optional[int] = old_tensor.shape[0] // 3 _snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 ) _snake_case : Union[str, Any] = query.reshape(snake_case__ ) _snake_case : Tuple = key.reshape(snake_case__ ) _snake_case : Any = value.reshape(snake_case__ ) for path in paths: _snake_case : List[Any] = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0] else: _snake_case : Optional[Any] = old_checkpoint[path["""old"""]] def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : int = {} _snake_case : Tuple = checkpoint["""time_embed.0.weight"""] _snake_case : List[str] = checkpoint["""time_embed.0.bias"""] _snake_case : List[str] = checkpoint["""time_embed.2.weight"""] _snake_case : Tuple = checkpoint["""time_embed.2.bias"""] _snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""] _snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""] _snake_case : List[Any] = checkpoint["""out.0.weight"""] _snake_case : Any = checkpoint["""out.0.bias"""] _snake_case : Any = checkpoint["""out.2.weight"""] _snake_case : List[str] = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _snake_case : Any = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only _snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _snake_case : Optional[int] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only _snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _snake_case : List[Any] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): _snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1) _snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _snake_case : Union[str, Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] _snake_case : Dict = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _snake_case : Optional[int] = renew_resnet_paths(snake_case__ ) _snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : List[str] = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : Optional[int] = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) _snake_case : int = middle_blocks[0] _snake_case : List[str] = middle_blocks[1] _snake_case : Any = middle_blocks[2] _snake_case : Dict = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Any = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Dict = renew_attention_paths(snake_case__ ) _snake_case : Tuple = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): _snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1) _snake_case : Dict = i % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] _snake_case : Any = {} for layer in output_block_layers: _snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: _snake_case : str = [layer_name] if len(snake_case__ ) > 1: _snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _snake_case : List[Any] = renew_resnet_paths(snake_case__ ) _snake_case : int = renew_resnet_paths(snake_case__ ) _snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _snake_case : Any = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _snake_case : Optional[int] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: _snake_case : Any = [] if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : str = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : int = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , ) else: _snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] ) _snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] ) _snake_case : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A_ = parser.parse_args() A_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ = json.loads(f.read()) A_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
28
1
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : List[str] = parent_key + sep + k if parent_key else k if isinstance(snake_case__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() ) else: items.append((new_key, v) ) return dict(snake_case__ ) _snake_case : Dict = argparse.Namespace() with open(snake_case__ , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader ) _snake_case : Any = flatten_yaml_as_dict(snake_case__ ) for k, v in flat_cfg.items(): setattr(snake_case__ , snake_case__ , snake_case__ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) ) return config def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Dict = MobileViTVaConfig() _snake_case : Optional[int] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Union[str, Any] = 3_84 else: _snake_case : Optional[Any] = 2_56 _snake_case : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : str = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Union[str, Any] = 2_56 _snake_case : Tuple = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _snake_case : Tuple = 1_51 _snake_case : str = 5_12 _snake_case : List[Any] = """ade20k-id2label.json""" _snake_case : Union[str, Any] = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : int = """pascal-voc-id2label.json""" _snake_case : int = True # orig_config _snake_case : int = load_orig_config_file(snake_case__ ) assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Union[str, Any] = """huggingface/label-files""" _snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Tuple = idalabel _snake_case : Any = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = dct.pop(snake_case__ ) _snake_case : List[Any] = val def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ): """simple docstring""" if base_model: _snake_case : Any = """""" else: _snake_case : Union[str, Any] = """mobilevitv2.""" _snake_case : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : List[str] = k[8:] else: _snake_case : str = k if ".block." in k: _snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Any = [0, 1, 2, 3] elif i == 5: _snake_case : List[Any] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : Any = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : List[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case__ ) for k in keys_to_ignore: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): """simple docstring""" _snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval() _snake_case : Optional[Any] = False # remove and rename some keys of load the original model _snake_case : Union[str, Any] = checkpoint remove_unused_keys(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load modified state_dict model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : List[str] = outputs.logits _snake_case : Any = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
"""simple docstring""" from typing import Any def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not input_list: return [] _snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list] _snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import math class lowercase: '''simple docstring''' def UpperCamelCase_ ( self: Union[str, Any], a_: list[list[float]], a_: list[int] ): '''simple docstring''' _snake_case : str = 0.0 _snake_case : Optional[int] = 0.0 for i in range(len(a_ ) ): da += math.pow((sample[i] - weights[0][i]), 2 ) da += math.pow((sample[i] - weights[1][i]), 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase_ ( self: Tuple, a_: list[list[int | float]], a_: list[int], a_: int, a_: float ): '''simple docstring''' for i in range(len(a_ ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[str] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _snake_case : Dict = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _snake_case : Any = SelfOrganizingMap() _snake_case : Dict = 3 _snake_case : str = 0.5 for _ in range(snake_case__ ): for j in range(len(snake_case__ ) ): # training sample _snake_case : List[Any] = training_samples[j] # Compute the winning vector _snake_case : Optional[Any] = self_organizing_map.get_winner(snake_case__ , snake_case__ ) # Update the winning vector _snake_case : List[Any] = self_organizing_map.update(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # classify test sample _snake_case : Tuple = [0, 0, 0, 1] _snake_case : Dict = self_organizing_map.get_winner(snake_case__ , snake_case__ ) # results print(F"Clusters that the test sample belongs to : {winner}" ) print(F"Weights that have been trained : {weights}" ) # running the main() function if __name__ == "__main__": main()
28
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Any = num_channels _snake_case : Union[str, Any] = patch_size _snake_case : Dict = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : Any = layer_norm_eps _snake_case : int = stop_gradient _snake_case : Any = share_layernorm _snake_case : List[Any] = remove_last_layer @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = hidden_act _snake_case : List[Any] = initializer_factor _snake_case : Optional[int] = intermediate_size _snake_case : int = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : Optional[int] = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Dict = position_embedding_type _snake_case : Dict = use_cache _snake_case : int = pad_token_id _snake_case : Union[str, Any] = bos_token_id _snake_case : Union[str, Any] = eos_token_id @classmethod def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ): '''simple docstring''' _snake_case : str = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : str = share_cross_modal_transformer_layers _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_size _snake_case : Union[str, Any] = initializer_factor _snake_case : Dict = layer_norm_eps _snake_case : Dict = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : int = num_hidden_layers _snake_case : int = tie_word_embeddings _snake_case : Optional[Any] = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : str = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : Any = BridgeTowerTextConfig(**a_ ) _snake_case : List[Any] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : str = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
28
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class lowercase( unittest.TestCase ): '''simple docstring''' def __init__( self: Tuple, a_: Optional[Any], a_: Optional[Any]=7, a_: Any=3, a_: Tuple=18, a_: int=30, a_: Union[str, Any]=400, a_: List[Any]=True, a_: int=None, a_: Optional[Any]=True, a_: List[str]=None, a_: Optional[Any]=True, a_: Tuple=[0.48_145_466, 0.4_578_275, 0.40_821_073], a_: Optional[int]=[0.26_862_954, 0.26_130_258, 0.27_577_711], a_: Union[str, Any]=True, ): '''simple docstring''' _snake_case : str = size if size is not None else {"""height""": 224, """width""": 224} _snake_case : str = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _snake_case : Tuple = parent _snake_case : List[str] = batch_size _snake_case : Tuple = num_channels _snake_case : int = image_size _snake_case : Dict = min_resolution _snake_case : Optional[Any] = max_resolution _snake_case : Any = do_resize _snake_case : Tuple = size _snake_case : int = do_center_crop _snake_case : List[Any] = crop_size _snake_case : Optional[Any] = do_normalize _snake_case : Optional[Any] = image_mean _snake_case : Dict = image_std _snake_case : str = do_convert_rgb def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase_ ( self: Union[str, Any], a_: Any=False, a_: List[str]=False, a_: Union[str, Any]=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: _snake_case : List[Any] = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta ) ) else: _snake_case : Any = [] for i in range(self.batch_size ): _snake_case , _snake_case : Union[str, Any] = np.random.choice(np.arange(self.min_resolution, self.max_resolution ), 2 ) image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension _snake_case : str = [Image.fromarray(np.moveaxis(a_, 0, -1 ) ) for x in image_inputs] if torchify: _snake_case : Dict = [torch.from_numpy(a_ ) for x in image_inputs] return image_inputs @require_torch @require_vision class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Tuple = ChineseCLIPImageProcessingTester(self, do_center_crop=a_ ) @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_, """do_resize""" ) ) self.assertTrue(hasattr(a_, """size""" ) ) self.assertTrue(hasattr(a_, """do_center_crop""" ) ) self.assertTrue(hasattr(a_, """center_crop""" ) ) self.assertTrue(hasattr(a_, """do_normalize""" ) ) self.assertTrue(hasattr(a_, """image_mean""" ) ) self.assertTrue(hasattr(a_, """image_std""" ) ) self.assertTrue(hasattr(a_, """do_convert_rgb""" ) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"""height""": 224, """width""": 224} ) self.assertEqual(image_processor.crop_size, {"""height""": 18, """width""": 18} ) _snake_case : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 ) self.assertEqual(image_processor.size, {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size, {"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _snake_case : Any = self.image_processor_tester.prepare_inputs(equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_, Image.Image ) # Test not batched input _snake_case : int = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) # Test batched _snake_case : Union[str, Any] = image_processing(a_, return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _snake_case : Any = self.image_processor_tester.prepare_inputs(equal_resolution=a_, numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_, np.ndarray ) # Test not batched input _snake_case : str = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) # Test batched _snake_case : Optional[int] = image_processing(a_, return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _snake_case : str = self.image_processor_tester.prepare_inputs(equal_resolution=a_, torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_, torch.Tensor ) # Test not batched input _snake_case : Tuple = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) # Test batched _snake_case : str = image_processing(a_, return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) @require_torch @require_vision class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : List[str] = ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=a_ ) _snake_case : Optional[int] = 3 @property def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_, """do_resize""" ) ) self.assertTrue(hasattr(a_, """size""" ) ) self.assertTrue(hasattr(a_, """do_center_crop""" ) ) self.assertTrue(hasattr(a_, """center_crop""" ) ) self.assertTrue(hasattr(a_, """do_normalize""" ) ) self.assertTrue(hasattr(a_, """image_mean""" ) ) self.assertTrue(hasattr(a_, """image_std""" ) ) self.assertTrue(hasattr(a_, """do_convert_rgb""" ) ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _snake_case : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_, Image.Image ) # Test not batched input _snake_case : Dict = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) # Test batched _snake_case : List[str] = image_processing(a_, return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), )
28
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" ) return image def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Optional[int] = val def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" ) _snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict _snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) _snake_case : Dict = qkv_bias def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24 _snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict() elif "opt-6.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict() elif "t5-xl" in model_name: _snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ ) return config, image_size @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ): """simple docstring""" _snake_case : List[str] = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0] _snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ ) _snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval() _snake_case : int = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _snake_case , _snake_case : List[Any] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" _snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print("""Done!""" ) # update state dict keys _snake_case : Any = original_model.state_dict() _snake_case : Dict = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _snake_case : str = state_dict.pop(snake_case__ ) if key.startswith("""Qformer.bert""" ): _snake_case : str = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _snake_case : Any = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _snake_case : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _snake_case : List[Any] = key.replace("""t5""" , """language""" ) _snake_case : str = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) _snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) assert len(snake_case__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _snake_case : Any = load_demo_image() _snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) _snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ ) # create processor _snake_case : Any = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) _snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case__ , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "opt" in model_name: _snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _snake_case : int = hf_model(snake_case__ , snake_case__ ).logits else: _snake_case : str = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _snake_case : List[str] = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _snake_case : Union[str, Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ ) else: # cast to same type _snake_case : int = logits.dtype assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _snake_case : Any = """""" _snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ ) _snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} ) _snake_case : Tuple = hf_model.generate( snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , snake_case__ ) _snake_case : Optional[Any] = input_ids.shape[1] _snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ ) _snake_case : Optional[Any] = [text.strip() for text in output_text] print("""HF generation:""" , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F"nielsr/{model_name}" ) hf_model.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() A_ = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) A_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class lowercase: '''simple docstring''' lowercase__ = 42 lowercase__ = None lowercase__ = None def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = Node(1 ) _snake_case : Dict = Node(2 ) _snake_case : Dict = Node(3 ) _snake_case : Dict = Node(4 ) _snake_case : Union[str, Any] = Node(5 ) return tree def UpperCAmelCase__ (snake_case__ : Node | None ): """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def UpperCAmelCase__ (snake_case__ : Node | None ): """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def UpperCAmelCase__ (snake_case__ : Node | None ): """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def UpperCAmelCase__ (snake_case__ : Node | None ): """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def UpperCAmelCase__ (snake_case__ : Node | None ): """simple docstring""" _snake_case : list[Any] = [] if root is None: return output _snake_case : Union[str, Any] = deque([root] ) while process_queue: _snake_case : str = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def UpperCAmelCase__ (snake_case__ : Node | None , snake_case__ : int ): """simple docstring""" _snake_case : list[Any] = [] def populate_output(snake_case__ : Node | None , snake_case__ : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(snake_case__ , snake_case__ ) return output def UpperCAmelCase__ (snake_case__ : Node | None , snake_case__ : int ): """simple docstring""" _snake_case : list[Any] = [] def populate_output(snake_case__ : Node | None , snake_case__ : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(snake_case__ , snake_case__ ) return output def UpperCAmelCase__ (snake_case__ : Node | None ): """simple docstring""" if root is None: return [] _snake_case : list[Sequence[Node | None]] = [] _snake_case : List[Any] = 0 _snake_case : Optional[int] = height(snake_case__ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case__ , snake_case__ ) ) _snake_case : Tuple = 1 else: output.append(get_nodes_from_right_to_left(snake_case__ , snake_case__ ) ) _snake_case : Tuple = 0 return output def UpperCAmelCase__ (): # Main function for testing. """simple docstring""" _snake_case : Optional[int] = make_tree() print(F"In-order Traversal: {inorder(snake_case__ )}" ) print(F"Pre-order Traversal: {preorder(snake_case__ )}" ) print(F"Post-order Traversal: {postorder(snake_case__ )}" , """\n""" ) print(F"Height of Tree: {height(snake_case__ )}" , """\n""" ) print("""Complete Level Order Traversal: """ ) print(level_order(snake_case__ ) , """\n""" ) print("""Level-wise order Traversal: """ ) for level in range(1 , height(snake_case__ ) + 1 ): print(F"Level {level}:" , get_nodes_from_left_to_right(snake_case__ , level=snake_case__ ) ) print("""\nZigZag order Traversal: """ ) print(zigzag(snake_case__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
28
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : List[str] = parent_key + sep + k if parent_key else k if isinstance(snake_case__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() ) else: items.append((new_key, v) ) return dict(snake_case__ ) _snake_case : Dict = argparse.Namespace() with open(snake_case__ , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader ) _snake_case : Any = flatten_yaml_as_dict(snake_case__ ) for k, v in flat_cfg.items(): setattr(snake_case__ , snake_case__ , snake_case__ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) ) return config def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Dict = MobileViTVaConfig() _snake_case : Optional[int] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Union[str, Any] = 3_84 else: _snake_case : Optional[Any] = 2_56 _snake_case : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : str = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Union[str, Any] = 2_56 _snake_case : Tuple = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _snake_case : Tuple = 1_51 _snake_case : str = 5_12 _snake_case : List[Any] = """ade20k-id2label.json""" _snake_case : Union[str, Any] = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : int = """pascal-voc-id2label.json""" _snake_case : int = True # orig_config _snake_case : int = load_orig_config_file(snake_case__ ) assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Union[str, Any] = """huggingface/label-files""" _snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Tuple = idalabel _snake_case : Any = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = dct.pop(snake_case__ ) _snake_case : List[Any] = val def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ): """simple docstring""" if base_model: _snake_case : Any = """""" else: _snake_case : Union[str, Any] = """mobilevitv2.""" _snake_case : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : List[str] = k[8:] else: _snake_case : str = k if ".block." in k: _snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Any = [0, 1, 2, 3] elif i == 5: _snake_case : List[Any] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : Any = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : List[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case__ ) for k in keys_to_ignore: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): """simple docstring""" _snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval() _snake_case : Optional[Any] = False # remove and rename some keys of load the original model _snake_case : Union[str, Any] = checkpoint remove_unused_keys(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load modified state_dict model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : List[str] = outputs.logits _snake_case : Any = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
1
"""simple docstring""" import inspect import unittest class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' try: import diffusers # noqa: F401 except ImportError: assert False def UpperCamelCase_ ( self: int ): '''simple docstring''' import diffusers from diffusers.dependency_versions_table import deps _snake_case : List[str] = inspect.getmembers(a_, inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": _snake_case : Optional[Any] = """k-diffusion""" elif backend == "invisible_watermark": _snake_case : Tuple = """invisible-watermark""" assert backend in deps, f"{backend} is not in the deps table!"
28
"""simple docstring""" import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, '''src''', '''diffusers''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" ) self.assertEqual(a_, """torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(a_, """torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case : Union[str, Any] = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(a_, """torch_and_transformers_and_onnx""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""", a_ ) self.assertIn("""torch_and_transformers""", a_ ) self.assertIn("""flax_and_transformers""", a_ ) self.assertIn("""torch_and_transformers_and_onnx""", a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""", objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" ) self.assertEqual(a_, """\nCONSTANT = None\n""" ) _snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" ) self.assertEqual( a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) _snake_case : List[Any] = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ _snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ _snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""], a_ )
28
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ): """simple docstring""" def get_matched_characters(snake_case__ : str , snake_case__ : str ) -> str: _snake_case : str = [] _snake_case : Any = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): _snake_case : Optional[int] = int(max(0 , i - limit ) ) _snake_case : Any = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(snake_case__ ) _snake_case : List[str] = F"{_stra[0:_stra.index(snake_case__ )]} {_stra[_stra.index(snake_case__ ) + 1:]}" return "".join(snake_case__ ) # matching characters _snake_case : Optional[Any] = get_matched_characters(snake_case__ , snake_case__ ) _snake_case : Optional[Any] = get_matched_characters(snake_case__ , snake_case__ ) _snake_case : int = len(snake_case__ ) # transposition _snake_case : List[str] = ( len([(ca, ca) for ca, ca in zip(snake_case__ , snake_case__ ) if ca != ca] ) // 2 ) if not match_count: _snake_case : Tuple = 0.0 else: _snake_case : str = ( 1 / 3 * ( match_count / len(snake_case__ ) + match_count / len(snake_case__ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters _snake_case : int = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('''hello''', '''world'''))
28
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A_ = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''OwlViTFeatureExtractor'''] A_ = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ): """simple docstring""" def run_func(snake_case__ : Tuple ): @wraps(snake_case__ ) def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ): return func(*snake_case__ , **snake_case__ ) @wraps(snake_case__ ) @tf.function(experimental_compile=snake_case__ ) def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ): return func(*snake_case__ , **snake_case__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = random.Random() _snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowercase( __a ): '''simple docstring''' lowercase__ = 42 lowercase__ = 42 lowercase__ = "TensorFlow" @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return tf.__version__ def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[str] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_speed(_inference ) def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ ) return self._measure_speed(_train ) def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_memory(_inference ) def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ ) return self._measure_memory(_train ) def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : List[Any] = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Dict = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : List[str] = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_forward(): return model(a_, decoder_input_ids=a_, training=a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_forward(): return model(a_, training=a_ ) _snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : Tuple = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : str = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Tuple = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : int = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_train(): _snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0] _snake_case : str = tf.gradients(a_, model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_train(): _snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0] _snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables ) return gradients _snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(a_, repeat=1, number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case : Dict = timeit.repeat( a_, repeat=self.args.repeat, number=10, ) return min(a_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _snake_case : List[Any] = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _snake_case : Optional[Any] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ ) _snake_case : List[str] = meminfo.used _snake_case : Any = Memory(a_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _snake_case : List[Any] = None else: _snake_case : int = measure_peak_memory_cpu(a_ ) _snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case : Tuple = stop_memory_tracing(a_ ) if memory is None: _snake_case : int = summary.total else: _snake_case : int = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) return "N/A", None
28
1
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : Dict = {} _snake_case : Union[str, Any] = job["""started_at"""] _snake_case : Any = job["""completed_at"""] _snake_case : Optional[int] = date_parser.parse(snake_case__ ) _snake_case : List[str] = date_parser.parse(snake_case__ ) _snake_case : int = round((end_datetime - start_datetime).total_seconds() / 60.0 ) _snake_case : Dict = start _snake_case : Dict = end _snake_case : Any = duration_in_min return job_info def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple=None ): """simple docstring""" _snake_case : Optional[Any] = None if token is not None: _snake_case : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"} _snake_case : Tuple = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" _snake_case : Optional[Any] = requests.get(snake_case__ , headers=snake_case__ ).json() _snake_case : int = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} ) _snake_case : Optional[int] = math.ceil((result["""total_count"""] - 1_00) / 1_00 ) for i in range(snake_case__ ): _snake_case : List[str] = requests.get(url + F"&page={i + 2}" , headers=snake_case__ ).json() job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} ) return job_time except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') A_ = parser.parse_args() A_ = get_job_time(args.workflow_run_id) A_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v['duration']}''')
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ): """simple docstring""" _snake_case : str = int(snake_case__ ) # Initialize Result _snake_case : str = [] # Traverse through all denomination for denomination in reversed(snake_case__ ): # Find denominations while int(snake_case__ ) >= int(snake_case__ ): total_value -= int(snake_case__ ) answer.append(snake_case__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A_ = [] A_ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): A_ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) A_ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] A_ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F'''Following is minimal change for {value}: ''') A_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
28
1
"""simple docstring""" import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def UpperCAmelCase__ (*snake_case__ : List[Any] , snake_case__ : Optional[Union[Dict, Any]] = None , snake_case__ : str=True , snake_case__ : Dict=2 ): """simple docstring""" from .. import __version__ _snake_case : Tuple = take_from _snake_case : Tuple = () if not isinstance(args[0] , snake_case__ ): _snake_case : Optional[int] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(snake_case__ ).base_version ) >= version.parse(snake_case__ ): raise ValueError( F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" F" version {__version__} is >= {version_name}" ) _snake_case : Optional[Any] = None if isinstance(snake_case__ , snake_case__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(snake_case__ ),) _snake_case : List[Any] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(snake_case__ , snake_case__ ): values += (getattr(snake_case__ , snake_case__ ),) _snake_case : Union[str, Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: _snake_case : List[Any] = F"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: _snake_case : Any = warning + """ """ if standard_warn else """""" warnings.warn(warning + message , snake_case__ , stacklevel=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0: _snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1] _snake_case : str = call_frame.filename _snake_case : int = call_frame.lineno _snake_case : Any = call_frame.function _snake_case , _snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" ) if len(snake_case__ ) == 0: return elif len(snake_case__ ) == 1: return values[0] return values
28
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowercase: '''simple docstring''' def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ): '''simple docstring''' _snake_case : Optional[int] = parent _snake_case : Optional[Any] = 100 _snake_case : Any = batch_size _snake_case : List[Any] = image_size _snake_case : Optional[Any] = patch_size _snake_case : str = num_channels _snake_case : Tuple = is_training _snake_case : Tuple = use_labels _snake_case : Any = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Union[str, Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : str = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : Optional[Any] = type_sequence_label_size _snake_case : Any = initializer_range _snake_case : List[str] = scope _snake_case : int = out_indices _snake_case : Optional[Any] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case : Dict = (image_size // patch_size) ** 2 _snake_case : str = num_patches + 1 def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : List[Any] = None _snake_case : Tuple = None if self.use_labels: _snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) _snake_case : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ): '''simple docstring''' _snake_case : str = BeitModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = BeitForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.type_sequence_label_size _snake_case : Any = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case : Any = 1 _snake_case : str = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : Optional[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ): '''simple docstring''' _snake_case : List[str] = self.num_labels _snake_case : List[Any] = BeitForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() _snake_case : List[str] = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) _snake_case : str = model(a_, labels=a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = BeitModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""BEiT does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) _snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_, nn.Linear ) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Any = model_class(a_ ) _snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[Any] = [*signature.parameters.keys()] _snake_case : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]: continue _snake_case : List[Any] = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : List[Any] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _snake_case : Dict = False _snake_case : Optional[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Any = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() _snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : int = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : int = _config_zero_init(a_ ) for model_class in self.all_model_classes: _snake_case : Tuple = model_class(config=a_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = BeitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ ) _snake_case : Dict = self.default_image_processor _snake_case : Dict = prepare_img() _snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ ) # prepare bool_masked_pos _snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Optional[int] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ ) _snake_case : List[Any] = self.default_image_processor _snake_case : Any = prepare_img() _snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(**a_ ) _snake_case : Optional[int] = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(logits.shape, a_ ) _snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : str = 281 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to( a_ ) _snake_case : int = self.default_image_processor _snake_case : Optional[Any] = prepare_img() _snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Union[str, Any] = model(**a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 21_841) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : List[str] = 2_396 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : int = model.to(a_ ) _snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] ) _snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits # verify the logits _snake_case : List[str] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" ) if is_pillow_less_than_a: _snake_case : Any = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ], device=a_, ) else: _snake_case : Optional[Any] = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ], device=a_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : List[Any] = model.to(a_ ) _snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : str = Image.open(ds[0]["""file"""] ) _snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits.detach().cpu() _snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] ) _snake_case : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape, a_ ) _snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ ) _snake_case : List[str] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape, a_ )
28
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional @dataclass class lowercase: '''simple docstring''' lowercase__ = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} ) lowercase__ = field( default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} ) lowercase__ = field( default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} ) lowercase__ = field( default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} ) lowercase__ = field(default=2 , metadata={"help": "Batch size for training."} ) lowercase__ = field(default=2 , metadata={"help": "Batch size for evaluation."} ) lowercase__ = field(default=0.1 , metadata={"help": "Value of weight decay."} ) lowercase__ = field( default=1_00_00 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} ) lowercase__ = field(default=2e-4 , metadata={"help": "Learning rate fo training."} ) lowercase__ = field(default="cosine" , metadata={"help": "Learning rate."} ) lowercase__ = field( default=7_50 , metadata={"help": "Number of warmup steps in the learning rate schedule."} ) lowercase__ = field( default=16 , metadata={"help": "Number of gradient accumulation steps."} ) lowercase__ = field( default=__a , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} ) lowercase__ = field(default=5_00_00 , metadata={"help": "Maximum number of training steps."} ) lowercase__ = field( default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) lowercase__ = field(default=10_24 , metadata={"help": "Sequence lengths used for training."} ) lowercase__ = field(default=1 , metadata={"help": "Training seed."} ) lowercase__ = field( default=10_24 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , ) lowercase__ = field( default=__a , metadata={"help": "States path if the training should continue from a checkpoint folder."} ) lowercase__ = field(default=__a , metadata={"help": "If True the data is pretokenized."} ) @dataclass class lowercase: '''simple docstring''' lowercase__ = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} ) lowercase__ = field( default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} ) lowercase__ = field(default=2 , metadata={"help": "Batch size used for evaluation."} ) lowercase__ = field( default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) lowercase__ = field(default=10_24 , metadata={"help": "Length of sequences to be evaluated."} ) lowercase__ = field(default=1 , metadata={"help": "Random seed used for evaluation."} ) @dataclass class lowercase: '''simple docstring''' lowercase__ = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} ) lowercase__ = field(default=__a , metadata={"help": "Number of workers used for code evaluation."} ) lowercase__ = field( default=__a , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , ) lowercase__ = field( default=__a , metadata={"help": "Sample from the language model's output distribution."} ) lowercase__ = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} ) lowercase__ = field(default=2_56 , metadata={"help": "Maximum number of newly generated tokens."} ) lowercase__ = field(default=0 , metadata={"help": "Top-k parameter used for generation."} ) lowercase__ = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} ) lowercase__ = field(default=10 , metadata={"help": "Number of generations to run in parallel."} ) lowercase__ = field( default=2_00 , metadata={"help": "Number of completions to generate for each sample."} ) lowercase__ = field(default=1 , metadata={"help": "Random seed used for evaluation."} ) lowercase__ = field( default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} ) lowercase__ = field( default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} ) lowercase__ = field( default=-1 , metadata={ "help": ( "Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive" " number corresponds to which GPU device id to run on." ) } , ) @dataclass class lowercase: '''simple docstring''' lowercase__ = field( default=__a , metadata={ "help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available." } , ) lowercase__ = field( default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} ) lowercase__ = field( default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} ) lowercase__ = field( default=10_00_00 , metadata={"help": "Number of files to save per JSON output file."} ) lowercase__ = field(default="content" , metadata={"help": "Column containing text data to process."} ) lowercase__ = field( default=10_00 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} ) lowercase__ = field( default=1_00 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} ) lowercase__ = field( default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} ) lowercase__ = field( default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} ) lowercase__ = field( default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} ) lowercase__ = field( default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , ) lowercase__ = field( default=__a , metadata={"help": "If True, near-duplicate samples are removed."} ) lowercase__ = field( default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} ) @dataclass class lowercase: '''simple docstring''' lowercase__ = field( default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} ) lowercase__ = field( default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} ) lowercase__ = field(default="content" , metadata={"help": "Column containing text data to process."} ) lowercase__ = field(default=20_00_00 , metadata={"help": "Number of examples to train tokenizer on."} ) lowercase__ = field( default=3_27_68 , metadata={"help": "Number of examples to train the tokenizer on."} ) lowercase__ = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} ) lowercase__ = field(default=__a , metadata={"help": "Push saved tokenizer to the hub."} ) @dataclass class lowercase: '''simple docstring''' lowercase__ = field( default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} ) lowercase__ = field( default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} ) lowercase__ = field( default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} ) lowercase__ = field(default=__a , metadata={"help": "Number of workers used for code evaluation."} ) @dataclass class lowercase: '''simple docstring''' lowercase__ = field( default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} ) lowercase__ = field( default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} ) lowercase__ = field(default="codeparrot" , metadata={"help": "Name of the created model."} ) lowercase__ = field(default=__a , metadata={"help": "Push saved tokenizer to the hub."} )
28
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase( __a ): '''simple docstring''' lowercase__ = (IPNDMScheduler,) lowercase__ = (("num_inference_steps", 50),) def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = {"""num_train_timesteps""": 1_000} config.update(**a_ ) return config def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = dict(self.forward_default_kwargs ) _snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[Any] = self.dummy_sample _snake_case : Dict = 0.1 * sample _snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : int = self.get_scheduler_config(**a_ ) _snake_case : Dict = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : int = dummy_past_residuals[:] if time_step is None: _snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : Tuple = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : Optional[Any] = dummy_past_residuals[:] _snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[int] = self.dummy_sample _snake_case : Tuple = 0.1 * sample _snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : Any = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case : Union[str, Any] = dummy_past_residuals[:] if time_step is None: _snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : List[str] = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case : List[str] = dummy_past_residuals[:] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config(**a_ ) _snake_case : List[Any] = scheduler_class(**a_ ) _snake_case : Union[str, Any] = 10 _snake_case : Union[str, Any] = self.dummy_model() _snake_case : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : Optional[Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _snake_case : Union[str, Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample return sample def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : int = kwargs.pop("""num_inference_steps""", a_ ) for scheduler_class in self.scheduler_classes: _snake_case : Union[str, Any] = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) _snake_case : Dict = self.dummy_sample _snake_case : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ): _snake_case : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _snake_case : List[str] = dummy_past_residuals[:] _snake_case : Optional[int] = scheduler.timesteps[5] _snake_case : Optional[Any] = scheduler.timesteps[6] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.full_loop() _snake_case : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
28
1
"""simple docstring""" import numpy as np def UpperCAmelCase__ (snake_case__ : np.array ): """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _snake_case : Any = [] for num in range(len(snake_case__ ) ): _snake_case : Optional[int] = 0 while 2 * i * i <= odd_composites[num]: _snake_case : Optional[int] = odd_composites[num] - 2 * i * i if is_prime(snake_case__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case__ ) == n: return list_nums return [] def UpperCAmelCase__ (): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ): '''simple docstring''' _snake_case : Optional[int] = device _snake_case : str = CLIPTokenizerFast.from_pretrained(a_ ) _snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073] _snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std ) _snake_case : Optional[int] = torchvision.transforms.Resize(224 ) _snake_case : str = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self: List[str], a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.resize(a_ ) _snake_case : List[Any] = self.center_crop(a_ ) _snake_case : Optional[Any] = self.normalize(a_ ) return images def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer(text=a_, **a_ ) _snake_case : Any = self.preprocess_img(a_ ) _snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ): '''simple docstring''' super().__init__() _snake_case : int = None _snake_case : List[str] = device if device else get_device() if vqgan: _snake_case : Any = vqgan else: _snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ ) self.vqgan.eval() if clip: _snake_case : Tuple = clip else: _snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _snake_case : List[str] = ProcessorGradientFlow(device=self.device ) _snake_case : Union[str, Any] = iterations _snake_case : Dict = lr _snake_case : Optional[int] = log _snake_case : List[str] = make_grid _snake_case : Union[str, Any] = return_val _snake_case : List[str] = quantize _snake_case : List[str] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ): '''simple docstring''' _snake_case : Dict = [] if output_path is None: _snake_case : Tuple = """./animation.gif""" if input_path is None: _snake_case : Any = self.save_path _snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) ) if not len(a_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(a_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _snake_case : List[Any] = total_duration / len(a_ ) _snake_case : Optional[Any] = [frame_duration] * len(a_ ) if extend_frames: _snake_case : Optional[int] = 1.5 _snake_case : int = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(a_ ) ) imageio.mimsave(a_, a_, duration=a_ ) print(f"gif saved to {output_path}" ) def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ): '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device ) _snake_case : int = preprocess_vqgan(a_ ) _snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ ) return z def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.latent.detach().requires_grad_() _snake_case : Tuple = base_latent + transform_vector if self.quantize: _snake_case , *_snake_case : Any = self.vqgan.quantize(a_ ) else: _snake_case : List[Any] = trans_latent return self.vqgan.decode(a_ ) def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ): '''simple docstring''' _snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ ) _snake_case : Any = self.clip(**a_ ) _snake_case : str = clip_outputs.logits_per_image if weights is not None: _snake_case : Any = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] ) else: _snake_case : Tuple = torch.tensor([1], device=self.device ) _snake_case : int = -torch.log(a_ ) + torch.log(a_ ) return loss def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ): '''simple docstring''' _snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device ) _snake_case : Dict = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _snake_case : str = self._add_vector(a_ ) _snake_case : List[Any] = loop_post_process(a_ ) _snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ ) print("""CLIP loss""", a_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=a_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ): '''simple docstring''' wandb.init(reinit=a_, project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _snake_case : Any = Image.open(a_ ) _snake_case : str = image.resize((256, 256) ) wandb.log("""Original Image""", wandb.Image(a_ ) ) def UpperCamelCase_ ( self: str, a_: List[Any] ): '''simple docstring''' if not prompts: return [] _snake_case : List[str] = [] _snake_case : Tuple = [] if isinstance(a_, a_ ): _snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(a_, (tuple, list) ): _snake_case : List[Any] = prompt[0] _snake_case : Optional[Any] = float(prompt[1] ) elif ":" in prompt: _snake_case , _snake_case : List[Any] = prompt.split(""":""" ) _snake_case : str = float(a_ ) else: _snake_case : int = prompt _snake_case : Union[str, Any] = 1.0 processed_prompts.append(a_ ) weights.append(a_ ) return { "prompts": processed_prompts, "weights": torch.tensor(a_, device=self.device ), } def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ): '''simple docstring''' if image_path: _snake_case : Union[str, Any] = self._get_latent(a_ ) else: _snake_case : Any = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(a_, a_, a_ ) assert pos_prompts, "You must provide at least one positive prompt." _snake_case : str = self.process_prompts(a_ ) _snake_case : Dict = self.process_prompts(a_ ) if save_final and save_path is None: _snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(a_ ): os.makedirs(a_ ) else: _snake_case : List[Any] = save_path + """_""" + get_timestamp() os.makedirs(a_ ) _snake_case : Optional[Any] = save_path _snake_case : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(a_ ) ) _snake_case : List[Any] = loop_post_process(a_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ): if show_intermediate: show_pil(a_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(a_ )} ) if show_final: show_pil(a_ ) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
28
1
"""simple docstring""" from collections import deque def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" _snake_case : Dict = len(snake_case__ ) _snake_case : List[Any] = deque() _snake_case : List[str] = [False for _ in range(snake_case__ )] _snake_case : List[str] = [-1 for _ in range(snake_case__ )] _snake_case : Optional[int] = index_of[:] def strong_connect(snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ): _snake_case : Dict = index # the number when this node is seen _snake_case : Tuple = index # lowest rank node reachable from here index += 1 stack.append(snake_case__ ) _snake_case : Tuple = True for w in g[v]: if index_of[w] == -1: _snake_case : int = strong_connect(snake_case__ , snake_case__ , snake_case__ ) _snake_case : Dict = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: _snake_case : Dict = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: _snake_case : int = [] _snake_case : List[str] = stack.pop() _snake_case : int = False component.append(snake_case__ ) while w != v: _snake_case : Optional[int] = stack.pop() _snake_case : str = False component.append(snake_case__ ) components.append(snake_case__ ) return index _snake_case : Any = [] for v in range(snake_case__ ): if index_of[v] == -1: strong_connect(snake_case__ , 0 , snake_case__ ) return components def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : Dict = [[] for _ in range(snake_case__ )] for u, v in edges: g[u].append(snake_case__ ) return g if __name__ == "__main__": # Test A_ = 7 A_ = [0, 0, 1, 2, 3, 3, 4, 4, 6] A_ = [1, 3, 2, 0, 1, 4, 5, 6, 5] A_ = [(u, v) for u, v in zip(source, target)] A_ = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _snake_case : Dict = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ = { '''configuration_trajectory_transformer''': [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrajectoryTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrajectoryTransformerModel''', '''TrajectoryTransformerPreTrainedModel''', '''load_tf_weights_in_trajectory_transformer''', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ): '''simple docstring''' _snake_case : int = parent _snake_case : int = batch_size _snake_case : List[Any] = image_size _snake_case : List[str] = num_channels _snake_case : Tuple = num_stages _snake_case : Union[str, Any] = hidden_sizes _snake_case : List[Any] = depths _snake_case : Tuple = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[Any] = num_labels _snake_case : Tuple = initializer_range _snake_case : Tuple = out_features _snake_case : Tuple = out_indices _snake_case : Dict = scope def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size], self.num_labels ) _snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : int = ConvNextVaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = ConvNextVaForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : List[str] = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Tuple = None _snake_case : Tuple = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : str = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = ConvNextVaModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : List[Any] = True if model_class.__name__ in [ *get_values(a_ ), *get_values(a_ ), ]: continue _snake_case : Tuple = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Any = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : Any = False _snake_case : List[Any] = True if ( model_class.__name__ in [*get_values(a_ ), *get_values(a_ )] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Dict = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() _snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Optional[int] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : int = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[str] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = ConvNextVaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging A_ = logging.get_logger(__name__) class lowercase( __a ): '''simple docstring''' def __init__( self: int, a_: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _snake_case : Tuple = nn.ModuleList(a_ ) def UpperCamelCase_ ( self: Dict, a_: torch.FloatTensor, a_: Union[torch.Tensor, float, int], a_: torch.Tensor, a_: List[torch.tensor], a_: List[float], a_: Optional[torch.Tensor] = None, a_: Optional[torch.Tensor] = None, a_: Optional[torch.Tensor] = None, a_: Optional[Dict[str, Any]] = None, a_: bool = False, a_: bool = True, ): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(a_, a_, self.nets ) ): _snake_case , _snake_case : List[str] = controlnet( a_, a_, a_, a_, a_, a_, a_, a_, a_, a_, a_, ) # merge samples if i == 0: _snake_case , _snake_case : List[Any] = down_samples, mid_sample else: _snake_case : Tuple = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(a_, a_ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase_ ( self: Union[str, Any], a_: Union[str, os.PathLike], a_: bool = True, a_: Callable = None, a_: bool = False, a_: Optional[str] = None, ): '''simple docstring''' _snake_case : List[Any] = 0 _snake_case : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( a_, is_main_process=a_, save_function=a_, safe_serialization=a_, variant=a_, ) idx += 1 _snake_case : Tuple = model_path_to_save + f"_{idx}" @classmethod def UpperCamelCase_ ( cls: Any, a_: Optional[Union[str, os.PathLike]], **a_: str ): '''simple docstring''' _snake_case : Optional[int] = 0 _snake_case : List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _snake_case : int = pretrained_model_path while os.path.isdir(a_ ): _snake_case : List[str] = ControlNetModel.from_pretrained(a_, **a_ ) controlnets.append(a_ ) idx += 1 _snake_case : str = pretrained_model_path + f"_{idx}" logger.info(f"{len(a_ )} controlnets loaded from {pretrained_model_path}." ) if len(a_ ) == 0: raise ValueError( f"No ControlNets found under {os.path.dirname(a_ )}. Expected at least {pretrained_model_path + '_0'}." ) return cls(a_ )
28
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ): """simple docstring""" _snake_case : str = tmp_path / """cache""" _snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[Any] = features.copy() if features else default_expected_features _snake_case : List[Any] = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ): """simple docstring""" if issubclass(snake_case__ , snake_case__ ): _snake_case : Optional[Any] = parquet_path elif issubclass(snake_case__ , snake_case__ ): _snake_case : int = [parquet_path] _snake_case : Union[str, Any] = tmp_path / """cache""" _snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_dataset(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) for split in splits: _snake_case : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = tmp_path / """cache""" _snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[int] = tmp_path / """cache""" _snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Optional[Any] = features.copy() if features else default_expected_features _snake_case : Dict = ( Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): """simple docstring""" if split: _snake_case : int = {split: parquet_path} else: _snake_case : Optional[Any] = """train""" _snake_case : int = {"""train""": parquet_path, """test""": parquet_path} _snake_case : Dict = tmp_path / """cache""" _snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read() _check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ): """simple docstring""" _snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" ) _snake_case : int = pf.read() assert dataset.data.table == output_table def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" ) _snake_case : Tuple = {"""image""": [image_path]} _snake_case : Optional[int] = Features({"""image""": Image()} ) _snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ ) _snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" assert get_writer_batch_size(snake_case__ ) == expected
28
1
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any]=True , snake_case__ : int="pt" ): """simple docstring""" _snake_case : Dict = {"""add_prefix_space""": True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(""" """ ) else {} _snake_case : Dict = padding_side return tokenizer( [line] , max_length=snake_case__ , padding="""max_length""" if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int=None , ): """simple docstring""" _snake_case : str = input_ids.ne(snake_case__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class lowercase( __a ): '''simple docstring''' def __init__( self: Union[str, Any], a_: Optional[int], a_: List[Any], a_: Optional[Any], a_: List[Any], a_: Dict="train", a_: List[Any]=None, a_: Tuple=None, a_: Any=None, a_: Tuple="", ): '''simple docstring''' super().__init__() _snake_case : Union[str, Any] = Path(a_ ).joinpath(type_path + """.source""" ) _snake_case : int = Path(a_ ).joinpath(type_path + """.target""" ) _snake_case : int = self.get_char_lens(self.src_file ) _snake_case : List[Any] = max_source_length _snake_case : Union[str, Any] = max_target_length assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}" _snake_case : Optional[Any] = tokenizer _snake_case : Tuple = prefix if n_obs is not None: _snake_case : Any = self.src_lens[:n_obs] _snake_case : Union[str, Any] = src_lang _snake_case : str = tgt_lang def __len__( self: Dict ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self: Union[str, Any], a_: Union[str, Any] ): '''simple docstring''' _snake_case : Any = index + 1 # linecache starts at 1 _snake_case : Tuple = self.prefix + linecache.getline(str(self.src_file ), a_ ).rstrip("""\n""" ) _snake_case : Tuple = linecache.getline(str(self.tgt_file ), a_ ).rstrip("""\n""" ) assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer, a_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right _snake_case : List[str] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer, a_ ) else self.tokenizer ) _snake_case : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer, a_ ) else self.tokenizer _snake_case : List[str] = encode_line(a_, a_, self.max_source_length, """right""" ) _snake_case : Optional[Any] = encode_line(a_, a_, self.max_target_length, """right""" ) _snake_case : Dict = source_inputs["""input_ids"""].squeeze() _snake_case : int = target_inputs["""input_ids"""].squeeze() _snake_case : List[Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def UpperCamelCase_ ( a_: List[str] ): '''simple docstring''' return [len(a_ ) for x in Path(a_ ).open().readlines()] def UpperCamelCase_ ( self: str, a_: Dict ): '''simple docstring''' _snake_case : List[str] = torch.stack([x["""input_ids"""] for x in batch] ) _snake_case : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) _snake_case : Optional[int] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) _snake_case : Optional[int] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer, a_ ) else self.tokenizer.pad_token_id ) _snake_case : int = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer, a_ ) else self.tokenizer.pad_token_id ) _snake_case : Tuple = trim_batch(a_, a_ ) _snake_case , _snake_case : List[str] = trim_batch(a_, a_, attention_mask=a_ ) _snake_case : Optional[Any] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch A_ = getLogger(__name__) def UpperCAmelCase__ (snake_case__ : List[List] ): """simple docstring""" return list(itertools.chain.from_iterable(snake_case__ ) ) def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = get_git_info() save_json(snake_case__ , os.path.join(snake_case__ , """git_log.json""" ) ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=4 , **snake_case__ : List[Any] ): """simple docstring""" with open(snake_case__ , """w""" ) as f: json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ ) def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" with open(snake_case__ ) as f: return json.load(snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Dict = git.Repo(search_parent_directories=snake_case__ ) _snake_case : Any = { """repo_id""": str(snake_case__ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def UpperCAmelCase__ (snake_case__ : Callable , snake_case__ : Iterable ): """simple docstring""" return list(map(snake_case__ , snake_case__ ) ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Any ): """simple docstring""" with open(snake_case__ , """wb""" ) as f: return pickle.dump(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" def remove_articles(snake_case__ : Union[str, Any] ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , snake_case__ ) def white_space_fix(snake_case__ : List[Any] ): return " ".join(text.split() ) def remove_punc(snake_case__ : Any ): _snake_case : List[str] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(snake_case__ : int ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) ) def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict ): """simple docstring""" _snake_case : List[str] = normalize_answer(snake_case__ ).split() _snake_case : int = normalize_answer(snake_case__ ).split() _snake_case : List[Any] = Counter(snake_case__ ) & Counter(snake_case__ ) _snake_case : Union[str, Any] = sum(common.values() ) if num_same == 0: return 0 _snake_case : Dict = 1.0 * num_same / len(snake_case__ ) _snake_case : List[Any] = 1.0 * num_same / len(snake_case__ ) _snake_case : Optional[int] = (2 * precision * recall) / (precision + recall) return fa def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Any ): """simple docstring""" return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str] ): """simple docstring""" assert len(snake_case__ ) == len(snake_case__ ) _snake_case : Optional[Any] = 0 for hypo, pred in zip(snake_case__ , snake_case__ ): em += exact_match_score(snake_case__ , snake_case__ ) if len(snake_case__ ) > 0: em /= len(snake_case__ ) return {"em": em} def UpperCAmelCase__ (snake_case__ : Tuple ): """simple docstring""" return model_prefix.startswith("""rag""" ) def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : str = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead _snake_case : List[str] = """dropout_rate""" for p in extra_params: if getattr(snake_case__ , snake_case__ , snake_case__ ): if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(snake_case__ ) ) delattr(snake_case__ , snake_case__ ) continue _snake_case : Optional[int] = p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p] setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) ) delattr(snake_case__ , snake_case__ ) return hparams, config
28
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ): '''simple docstring''' _snake_case : Dict = parent _snake_case : Dict = batch_size _snake_case : Optional[Any] = image_size _snake_case : int = num_channels _snake_case : Tuple = num_stages _snake_case : int = hidden_sizes _snake_case : List[str] = depths _snake_case : str = is_training _snake_case : Dict = use_labels _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_act _snake_case : Any = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : Union[str, Any] = out_features _snake_case : Dict = num_labels _snake_case : int = scope _snake_case : Dict = num_stages def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Optional[int] = None if self.use_labels: _snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ): '''simple docstring''' _snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : List[Any] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = UperNetModelTester(self ) _snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Tuple = [*signature.parameters.keys()] _snake_case : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' pass def UpperCamelCase_ ( self: str ): '''simple docstring''' def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : List[str] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : int = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : Optional[int] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = _config_zero_init(a_ ) _snake_case : Dict = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ ) _snake_case : Dict = prepare_img() _snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Tuple = model(**a_ ) _snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : int = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ ) _snake_case : List[str] = prepare_img() _snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): _snake_case : Optional[Any] = model(**a_ ) _snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
28
1
"""simple docstring""" import unittest from knapsack import greedy_knapsack as kp class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Optional[Any] = [10, 20, 30, 40, 50, 60] _snake_case : List[str] = [2, 4, 6, 8, 10, 12] _snake_case : Tuple = 100 self.assertEqual(kp.calc_profit(a_, a_, a_ ), 210 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.assertRaisesRegex(a_, """max_weight must greater than zero.""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' self.assertRaisesRegex(a_, """Weight can not be negative.""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self.assertRaisesRegex(a_, """Profit can not be negative.""" ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.assertRaisesRegex(a_, """max_weight must greater than zero.""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' self.assertRaisesRegex( a_, """The length of profit and weight must be same.""" ) if __name__ == "__main__": unittest.main()
28
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path A_ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) A_ = [ord(letter) for letter in string.ascii_lowercase] A_ = {ord(char) for char in VALID_CHARS} A_ = ["the", "be", "to", "of", "and", "in", "that", "have"] def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ): """simple docstring""" _snake_case : str = "" _snake_case : int _snake_case : int _snake_case : int for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ): _snake_case : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case__ ) return decoded def UpperCAmelCase__ (snake_case__ : list[int] ): """simple docstring""" _snake_case : list[str] = [] for key in product(snake_case__ , repeat=3 ): _snake_case : List[Any] = try_key(snake_case__ , snake_case__ ) if encoded is not None: possibles.append(snake_case__ ) return possibles def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ): """simple docstring""" _snake_case : list[int] _snake_case : list[str] _snake_case : str _snake_case : str _snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" ) _snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )] _snake_case : Optional[Any] = filter_valid_chars(snake_case__ ) for common_word in COMMON_WORDS: _snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ ) if len(snake_case__ ) == 1: break _snake_case : Optional[int] = possibles[0] return sum(ord(snake_case__ ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class lowercase( unittest.TestCase ): '''simple docstring''' def __init__( self: int, a_: Tuple, a_: List[Any]=7, a_: Tuple=3, a_: Dict=10, a_: Dict=18, a_: Optional[int]=30, a_: Dict=400, a_: Optional[Any]=True, a_: Dict=None, a_: int=True, a_: Tuple=[0.5, 0.5, 0.5], a_: Tuple=[0.5, 0.5, 0.5], a_: Optional[int]=None, ): '''simple docstring''' _snake_case : Any = size if size is not None else {"""shortest_edge""": 18} _snake_case : List[str] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _snake_case : int = parent _snake_case : List[Any] = batch_size _snake_case : Union[str, Any] = num_channels _snake_case : Dict = num_frames _snake_case : str = image_size _snake_case : int = min_resolution _snake_case : Dict = max_resolution _snake_case : Any = do_resize _snake_case : Optional[Any] = size _snake_case : Optional[Any] = do_normalize _snake_case : Dict = image_mean _snake_case : str = image_std _snake_case : List[str] = crop_size def UpperCamelCase_ ( self: int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = VivitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Union[str, Any] = VivitImageProcessingTester(self ) @property def UpperCamelCase_ ( self: int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_, """image_mean""" ) ) self.assertTrue(hasattr(a_, """image_std""" ) ) self.assertTrue(hasattr(a_, """do_normalize""" ) ) self.assertTrue(hasattr(a_, """do_resize""" ) ) self.assertTrue(hasattr(a_, """do_center_crop""" ) ) self.assertTrue(hasattr(a_, """size""" ) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size, {"""height""": 18, """width""": 18} ) _snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 ) self.assertEqual(image_processor.size, {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size, {"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _snake_case : List[str] = prepare_video_inputs(self.image_processor_tester, equal_resolution=a_ ) for video in video_inputs: self.assertIsInstance(a_, a_ ) self.assertIsInstance(video[0], Image.Image ) # Test not batched input _snake_case : Optional[Any] = image_processing(video_inputs[0], return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) # Test batched _snake_case : Dict = image_processing(a_, return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _snake_case : Union[str, Any] = prepare_video_inputs(self.image_processor_tester, equal_resolution=a_, numpify=a_ ) for video in video_inputs: self.assertIsInstance(a_, a_ ) self.assertIsInstance(video[0], np.ndarray ) # Test not batched input _snake_case : Dict = image_processing(video_inputs[0], return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) # Test batched _snake_case : List[Any] = image_processing(a_, return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _snake_case : str = prepare_video_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ ) for video in video_inputs: self.assertIsInstance(a_, a_ ) self.assertIsInstance(video[0], torch.Tensor ) # Test not batched input _snake_case : Any = image_processing(video_inputs[0], return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), ) # Test batched _snake_case : Union[str, Any] = image_processing(a_, return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ), )
28
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "feature_extractor"] lowercase__ = "TvltImageProcessor" lowercase__ = "TvltFeatureExtractor" def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ): '''simple docstring''' super().__init__(image_processor=a_, feature_extractor=a_ ) _snake_case : Any = image_processor _snake_case : Dict = feature_extractor def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ): '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) _snake_case : Optional[int] = None if images is not None: _snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ ) if images_mixed is not None: _snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ ) if audio is not None: _snake_case : Any = self.feature_extractor( a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ ) _snake_case : List[str] = {} if audio is not None: output_dict.update(a_ ) if images is not None: output_dict.update(a_ ) if images_mixed_dict is not None: output_dict.update(a_ ) return output_dict @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.image_processor.model_input_names _snake_case : List[str] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule A_ = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ = '''pt''' elif is_tf_available(): A_ = '''tf''' else: A_ = '''jax''' class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ByTaTokenizer lowercase__ = False def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' super().setUp() _snake_case : List[str] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def UpperCamelCase_ ( self: List[Any], **a_: int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ): '''simple docstring''' _snake_case : List[Any] = [] for i in range(len(a_ ) ): try: _snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) _snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) ) _snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) ) if max_length is not None and len(a_ ) > max_length: _snake_case : Tuple = toks[:max_length] if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0: while len(a_ ) < min_length: _snake_case : List[str] = toks + toks # toks_str = [t[1] for t in toks] _snake_case : Tuple = [t[0] for t in toks] # Ensure consistency _snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ ) if " " not in output_txt and len(a_ ) > 1: _snake_case : Dict = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ ) + """ """ + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ ) ) if with_prefix_space: _snake_case : Union[str, Any] = """ """ + output_txt _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) return output_txt, output_ids def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = self.ta_base_tokenizer _snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) _snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[str] = self.ta_base_tokenizer _snake_case : Tuple = """Unicode €.""" _snake_case : List[Any] = tokenizer(a_ ) _snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : Tuple = tokenizer.decode(a_ ) self.assertEqual(a_, """Unicode €.</s>""" ) _snake_case : Tuple = tokenizer("""e è é ê ë""" ) _snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""], a_ ) # decoding _snake_case : int = tokenizer.decode(a_ ) self.assertEqual(a_, """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.ta_base_tokenizer _snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off _snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ ) self.assertIsInstance(a_, a_ ) if FRAMEWORK != "jax": _snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: _snake_case : Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a_, a_ ) self.assertEqual((2, 37), batch.input_ids.shape ) self.assertEqual((2, 37), batch.attention_mask.shape ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""", a_ ) self.assertIn("""attention_mask""", a_ ) self.assertNotIn("""decoder_input_ids""", a_ ) self.assertNotIn("""decoder_attention_mask""", a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = self.ta_base_tokenizer _snake_case : Dict = [ """Summary of the text.""", """Another summary.""", ] _snake_case : Optional[int] = tokenizer( text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ ) self.assertEqual(32, targets["""input_ids"""].shape[1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = self.ta_base_tokenizer _snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""] _snake_case : Dict = ["""Summary of the text. </s>"""] # fmt: off _snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _snake_case : Optional[Any] = tokenizer(a_, text_target=a_ ) self.assertEqual(a_, batch["""input_ids"""][0] ) self.assertEqual(a_, batch["""labels"""][0] ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : List[str] = tempfile.mkdtemp() _snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) shutil.rmtree(a_ ) _snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc _snake_case : Union[str, Any] = tempfile.mkdtemp() _snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) _snake_case : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ ) _snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) _snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : Union[str, Any] = json.load(a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file: _snake_case : List[Any] = json.load(a_ ) _snake_case : int = [f"<extra_id_{i}>" for i in range(125 )] _snake_case : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] _snake_case : Dict = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile: json.dump(a_, a_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case : Optional[int] = tokenizer_class.from_pretrained( a_, ) self.assertIn( """an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )] _snake_case : List[Any] = tokenizer_class.from_pretrained( a_, additional_special_tokens=a_, ) self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) _snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] _snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ ) self.assertIsInstance(a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): _snake_case : Optional[int] = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _snake_case : Any = 0 _snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens( a_, skip_special_tokens=a_ ) for attr in attributes_list: setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, attr + """_id""", a_ ) self.assertEqual(getattr(a_, a_ ), a_ ) self.assertEqual(getattr(a_, attr + """_id""" ), a_ ) setattr(a_, """additional_special_tokens_ids""", [] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] ) setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] ) self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
28
1
"""simple docstring""" import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput A_ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase( __a ): '''simple docstring''' def __init__( self: Optional[Any], *a_: str, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, **a_: str ): '''simple docstring''' super().__init__(*a_, **a_ ) _snake_case : int = eval_examples _snake_case : Optional[int] = post_process_function _snake_case : Dict = quant_trainer_args _snake_case : Optional[int] = 128 # default number of calibration samples def UpperCamelCase_ ( self: Tuple, a_: Any=None ): '''simple docstring''' if calib_dataset is None and self.calib_dataset is None: raise ValueError("""Trainer: calibration requires an calib_dataset.""" ) _snake_case : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset _snake_case : List[Any] = self._remove_unused_columns(a_, description="""Calibration""" ) return DataLoader( a_, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory, shuffle=a_, ) def UpperCamelCase_ ( self: List[str], a_: Any=None ): '''simple docstring''' _snake_case : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset _snake_case : Optional[int] = self.get_calib_dataloader(a_ ) _snake_case : int = self.model quant_trainer.configure_model(a_, self.quant_trainer_args, calib=a_ ) model.eval() quant_trainer.enable_calibration(a_ ) logger.info("""***** Running calibration *****""" ) logger.info(f" Num examples = {self.calib_num}" ) logger.info(f" Batch size = {calib_dataloader.batch_size}" ) for step, inputs in enumerate(a_ ): # Prediction step _snake_case , _snake_case , _snake_case : str = self.prediction_step(a_, a_, prediction_loss_only=a_ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(a_, self.quant_trainer_args ) _snake_case : str = model def UpperCamelCase_ ( self: List[Any], a_: Optional[int]=None, a_: str=None, a_: Optional[int]=None, a_: str = "eval" ): '''simple docstring''' _snake_case : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset _snake_case : int = self.get_eval_dataloader(a_ ) _snake_case : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _snake_case : int = self.compute_metrics _snake_case : str = None _snake_case : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _snake_case : str = eval_loop( a_, description="""Evaluation""", prediction_loss_only=True if compute_metrics is None else None, ignore_keys=a_, ) finally: _snake_case : Any = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _snake_case : Any = self.post_process_function(a_, a_, output.predictions ) _snake_case : Union[str, Any] = self.compute_metrics(a_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"{metric_key_prefix}_" ): _snake_case : Dict = metrics.pop(a_ ) self.log(a_ ) else: _snake_case : Union[str, Any] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _snake_case : List[Any] = self.callback_handler.on_evaluate(self.args, self.state, self.control, a_ ) return metrics def UpperCamelCase_ ( self: Optional[Any], a_: int, a_: int, a_: Optional[Any]=None, a_: str = "test" ): '''simple docstring''' _snake_case : Dict = self.get_test_dataloader(a_ ) # Temporarily disable metric computation, we will do it in the loop here. _snake_case : Dict = self.compute_metrics _snake_case : Optional[int] = None _snake_case : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _snake_case : List[str] = eval_loop( a_, description="""Prediction""", prediction_loss_only=True if compute_metrics is None else None, ignore_keys=a_, ) finally: _snake_case : List[str] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _snake_case : List[Any] = self.post_process_function(a_, a_, output.predictions, """predict""" ) _snake_case : int = self.compute_metrics(a_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"{metric_key_prefix}_" ): _snake_case : List[str] = metrics.pop(a_ ) return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=a_ ) def UpperCamelCase_ ( self: Optional[int], a_: int="./" ): '''simple docstring''' _snake_case : Optional[int] = self.eval_dataset _snake_case : Union[str, Any] = self.get_eval_dataloader(a_ ) _snake_case : str = next(iter(a_ ) ) # saving device - to make it consistent _snake_case : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) # convert to tuple _snake_case : List[Any] = tuple(v.to(a_ ) for k, v in batch.items() ) logger.info("""Converting model to be onnx compatible""" ) from pytorch_quantization.nn import TensorQuantizer _snake_case : Dict = True _snake_case : str = self.model.to(a_ ) model.eval() model.float() _snake_case : List[Any] = model.module if hasattr(a_, """module""" ) else model quant_trainer.configure_model(a_, self.quant_trainer_args ) _snake_case : Dict = os.path.join(a_, """model.onnx""" ) logger.info(f"exporting model to {output_model_file}" ) _snake_case : List[Any] = {0: """batch_size""", 1: """seq_len"""} torch.onnx.export( a_, a_, a_, export_params=a_, opset_version=13, do_constant_folding=a_, input_names=["""input_ids""", """attention_mask""", """token_type_ids"""], output_names=["""output_start_logits""", """output_end_logits"""], dynamic_axes={ """input_ids""": axes, """attention_mask""": axes, """token_type_ids""": axes, """output_start_logits""": axes, """output_end_logits""": axes, }, verbose=a_, ) logger.info("""onnx export finished""" )
28
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase( __a ): '''simple docstring''' @staticmethod @abstractmethod def UpperCamelCase_ ( a_: ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' raise NotImplementedError()
28
1
"""simple docstring""" from collections.abc import Callable def UpperCAmelCase__ (snake_case__ : Callable[[float], float] , snake_case__ : float , snake_case__ : float ): """simple docstring""" _snake_case : float = a _snake_case : float = b if function(snake_case__ ) == 0: # one of the a or b is a root for the function return a elif function(snake_case__ ) == 0: return b elif ( function(snake_case__ ) * function(snake_case__ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("""could not find root in given interval.""" ) else: _snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(snake_case__ ) == 0: return mid elif function(snake_case__ ) * function(snake_case__ ) < 0: _snake_case : Tuple = mid else: _snake_case : List[str] = mid _snake_case : Any = start + (end - start) / 2.0 return mid def UpperCAmelCase__ (snake_case__ : float ): """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 10_00)) import doctest doctest.testmod()
28
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowercase( __a ): '''simple docstring''' lowercase__ = "roformer" def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ): '''simple docstring''' super().__init__(pad_token_id=a_, **a_ ) _snake_case : int = vocab_size _snake_case : int = hidden_size if embedding_size is None else embedding_size _snake_case : Dict = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Dict = hidden_act _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Tuple = type_vocab_size _snake_case : List[Any] = initializer_range _snake_case : List[Any] = layer_norm_eps _snake_case : Optional[Any] = rotary_value _snake_case : List[str] = use_cache class lowercase( __a ): '''simple docstring''' @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' if self.task == "multiple-choice": _snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case : List[str] = {0: """batch""", 1: """sequence"""} _snake_case : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
28
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool A_ = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class lowercase( __a ): '''simple docstring''' lowercase__ = "facebook/nllb-200-distilled-600M" lowercase__ = ( "This is a tool that translates text from a language to another. It takes three inputs: `text`, which should " "be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, " "which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in " "plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`." ) lowercase__ = "translator" lowercase__ = AutoTokenizer lowercase__ = AutoModelForSeqaSeqLM lowercase__ = LANGUAGE_CODES lowercase__ = ["text", "text", "text"] lowercase__ = ["text"] def UpperCamelCase_ ( self: str, a_: int, a_: Any, a_: Dict ): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(f"{src_lang} is not a supported language." ) if tgt_lang not in self.lang_to_code: raise ValueError(f"{tgt_lang} is not a supported language." ) _snake_case : List[Any] = self.lang_to_code[src_lang] _snake_case : int = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( a_, return_tensors="""pt""", src_lang=a_, tgt_lang=a_ ) def UpperCamelCase_ ( self: List[str], a_: Optional[Any] ): '''simple docstring''' return self.model.generate(**a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=a_ )
28
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ): """simple docstring""" _snake_case : Optional[Any] = [] for old_item in old_list: _snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" ) _snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" ) _snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" ) _snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" ) _snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ): """simple docstring""" _snake_case : Dict = [] for old_item in old_list: _snake_case : Dict = old_item _snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _snake_case : Union[str, Any] = old_checkpoint[path] _snake_case : Optional[int] = old_tensor.shape[0] // 3 _snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 ) _snake_case : Union[str, Any] = query.reshape(snake_case__ ) _snake_case : Tuple = key.reshape(snake_case__ ) _snake_case : Any = value.reshape(snake_case__ ) for path in paths: _snake_case : List[Any] = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0] else: _snake_case : Optional[Any] = old_checkpoint[path["""old"""]] def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : int = {} _snake_case : Tuple = checkpoint["""time_embed.0.weight"""] _snake_case : List[str] = checkpoint["""time_embed.0.bias"""] _snake_case : List[str] = checkpoint["""time_embed.2.weight"""] _snake_case : Tuple = checkpoint["""time_embed.2.bias"""] _snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""] _snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""] _snake_case : List[Any] = checkpoint["""out.0.weight"""] _snake_case : Any = checkpoint["""out.0.bias"""] _snake_case : Any = checkpoint["""out.2.weight"""] _snake_case : List[str] = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _snake_case : Any = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only _snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _snake_case : Optional[int] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only _snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _snake_case : List[Any] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): _snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1) _snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _snake_case : Union[str, Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] _snake_case : Dict = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _snake_case : Optional[int] = renew_resnet_paths(snake_case__ ) _snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : List[str] = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : Optional[int] = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) _snake_case : int = middle_blocks[0] _snake_case : List[str] = middle_blocks[1] _snake_case : Any = middle_blocks[2] _snake_case : Dict = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Any = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Dict = renew_attention_paths(snake_case__ ) _snake_case : Tuple = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): _snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1) _snake_case : Dict = i % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] _snake_case : Any = {} for layer in output_block_layers: _snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: _snake_case : str = [layer_name] if len(snake_case__ ) > 1: _snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _snake_case : List[Any] = renew_resnet_paths(snake_case__ ) _snake_case : int = renew_resnet_paths(snake_case__ ) _snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _snake_case : Any = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _snake_case : Optional[int] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: _snake_case : Any = [] if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : str = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : int = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , ) else: _snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] ) _snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] ) _snake_case : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A_ = parser.parse_args() A_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ = json.loads(f.read()) A_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
28
1
"""simple docstring""" from __future__ import annotations class lowercase: '''simple docstring''' def __init__( self: Optional[int], a_: int = 0 ): '''simple docstring''' _snake_case : str = key def UpperCamelCase_ ( self: Tuple, a_: str, a_: int ): '''simple docstring''' assert isinstance(a_, a_ ) and isinstance(a_, a_ ) _snake_case : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(a_ ) ^ key ) for ch in content] def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: int ): '''simple docstring''' assert isinstance(a_, a_ ) and isinstance(a_, a_ ) _snake_case : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(a_ ) ^ key ) for ch in content] def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int = 0 ): '''simple docstring''' assert isinstance(a_, a_ ) and isinstance(a_, a_ ) _snake_case : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _snake_case : List[str] = """""" for ch in content: ans += chr(ord(a_ ) ^ key ) return ans def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int = 0 ): '''simple docstring''' assert isinstance(a_, a_ ) and isinstance(a_, a_ ) _snake_case : Any = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _snake_case : str = """""" for ch in content: ans += chr(ord(a_ ) ^ key ) return ans def UpperCamelCase_ ( self: Dict, a_: str, a_: int = 0 ): '''simple docstring''' assert isinstance(a_, a_ ) and isinstance(a_, a_ ) try: with open(a_ ) as fin, open("""encrypt.out""", """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(a_, a_ ) ) except OSError: return False return True def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: int ): '''simple docstring''' assert isinstance(a_, a_ ) and isinstance(a_, a_ ) try: with open(a_ ) as fin, open("""decrypt.out""", """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(a_, a_ ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
28
"""simple docstring""" from typing import Any def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if not input_list: return [] _snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list] _snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors A_ = logging.getLogger(__name__) class lowercase( __a ): '''simple docstring''' lowercase__ = "sequence-classification" def __init__( self: Union[str, Any], a_: Any ): '''simple docstring''' if type(a_ ) == dict: _snake_case : Dict = Namespace(**a_ ) _snake_case : List[Any] = glue_output_modes[hparams.task] _snake_case : Optional[Any] = glue_tasks_num_labels[hparams.task] super().__init__(a_, a_, self.mode ) def UpperCamelCase_ ( self: List[Any], **a_: Optional[Any] ): '''simple docstring''' return self.model(**a_ ) def UpperCamelCase_ ( self: Optional[int], a_: List[Any], a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _snake_case : Optional[int] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None _snake_case : Union[str, Any] = self(**a_ ) _snake_case : List[str] = outputs[0] _snake_case : Tuple = self.trainer.lr_schedulers[0]["""scheduler"""] _snake_case : List[Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.hparams _snake_case : List[str] = processors[args.task]() _snake_case : Optional[Any] = processor.get_labels() for mode in ["train", "dev"]: _snake_case : Union[str, Any] = self._feature_file(a_ ) if os.path.exists(a_ ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""", a_ ) else: logger.info("""Creating features from dataset file at %s""", args.data_dir ) _snake_case : Any = ( processor.get_dev_examples(args.data_dir ) if mode == """dev""" else processor.get_train_examples(args.data_dir ) ) _snake_case : str = convert_examples_to_features( a_, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode, ) logger.info("""Saving features into cached file %s""", a_ ) torch.save(a_, a_ ) def UpperCamelCase_ ( self: str, a_: str, a_: int, a_: bool = False ): '''simple docstring''' _snake_case : Optional[int] = """dev""" if mode == """test""" else mode _snake_case : str = self._feature_file(a_ ) logger.info("""Loading features from cached file %s""", a_ ) _snake_case : int = torch.load(a_ ) _snake_case : Optional[int] = torch.tensor([f.input_ids for f in features], dtype=torch.long ) _snake_case : List[str] = torch.tensor([f.attention_mask for f in features], dtype=torch.long ) _snake_case : str = torch.tensor([f.token_type_ids for f in features], dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _snake_case : Optional[int] = torch.tensor([f.label for f in features], dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _snake_case : List[Any] = torch.tensor([f.label for f in features], dtype=torch.float ) return DataLoader( TensorDataset(a_, a_, a_, a_ ), batch_size=a_, shuffle=a_, ) def UpperCamelCase_ ( self: Tuple, a_: Dict, a_: List[str] ): '''simple docstring''' _snake_case : List[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _snake_case : Union[str, Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None _snake_case : Optional[int] = self(**a_ ) _snake_case , _snake_case : Union[str, Any] = outputs[:2] _snake_case : List[str] = logits.detach().cpu().numpy() _snake_case : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def UpperCamelCase_ ( self: Tuple, a_: int ): '''simple docstring''' _snake_case : Optional[Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item() _snake_case : Tuple = np.concatenate([x["""pred"""] for x in outputs], axis=0 ) if self.hparams.glue_output_mode == "classification": _snake_case : Tuple = np.argmax(a_, axis=1 ) elif self.hparams.glue_output_mode == "regression": _snake_case : Optional[int] = np.squeeze(a_ ) _snake_case : Optional[Any] = np.concatenate([x["""target"""] for x in outputs], axis=0 ) _snake_case : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )] _snake_case : Tuple = [[] for _ in range(out_label_ids.shape[0] )] _snake_case : Tuple = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task, a_, a_ )} _snake_case : Optional[int] = dict(results.items() ) _snake_case : str = results return ret, preds_list, out_label_list def UpperCamelCase_ ( self: str, a_: list ): '''simple docstring''' _snake_case , _snake_case , _snake_case : Tuple = self._eval_end(a_ ) _snake_case : int = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def UpperCamelCase_ ( self: str, a_: Any ): '''simple docstring''' _snake_case , _snake_case , _snake_case : str = self._eval_end(a_ ) _snake_case : List[str] = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def UpperCamelCase_ ( a_: Optional[Any], a_: Tuple ): '''simple docstring''' BaseTransformer.add_model_specific_args(a_, a_ ) parser.add_argument( """--max_seq_length""", default=128, type=a_, help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ), ) parser.add_argument( """--task""", default="""""", type=a_, required=a_, help="""The GLUE task to run""", ) parser.add_argument( """--gpus""", default=0, type=a_, help="""The number of GPUs allocated for this, it is by default 0 meaning none""", ) parser.add_argument( """--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""" ) return parser def UpperCAmelCase__ (): """simple docstring""" _snake_case : Dict = argparse.ArgumentParser() add_generic_args(snake_case__ , os.getcwd() ) _snake_case : Optional[Any] = GLUETransformer.add_model_specific_args(snake_case__ , os.getcwd() ) _snake_case : Tuple = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _snake_case : int = os.path.join( """./results""" , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , ) os.makedirs(args.output_dir ) _snake_case : Optional[Any] = GLUETransformer(snake_case__ ) _snake_case : Union[str, Any] = generic_train(snake_case__ , snake_case__ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _snake_case : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=snake_case__ ) ) _snake_case : Any = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(snake_case__ ) if __name__ == "__main__": main()
28
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Any = num_channels _snake_case : Union[str, Any] = patch_size _snake_case : Dict = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : Any = layer_norm_eps _snake_case : int = stop_gradient _snake_case : Any = share_layernorm _snake_case : List[Any] = remove_last_layer @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ): '''simple docstring''' super().__init__(**a_ ) _snake_case : str = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = hidden_act _snake_case : List[Any] = initializer_factor _snake_case : Optional[int] = intermediate_size _snake_case : int = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : Optional[int] = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Dict = position_embedding_type _snake_case : Dict = use_cache _snake_case : int = pad_token_id _snake_case : Union[str, Any] = bos_token_id _snake_case : Union[str, Any] = eos_token_id @classmethod def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ): '''simple docstring''' _snake_case : str = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : str = share_cross_modal_transformer_layers _snake_case : Any = hidden_act _snake_case : Union[str, Any] = hidden_size _snake_case : Union[str, Any] = initializer_factor _snake_case : Dict = layer_norm_eps _snake_case : Dict = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : int = num_hidden_layers _snake_case : int = tie_word_embeddings _snake_case : Optional[Any] = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : str = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : Any = BridgeTowerTextConfig(**a_ ) _snake_case : List[Any] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : str = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
28
1
"""simple docstring""" from __future__ import annotations import queue class lowercase: '''simple docstring''' def __init__( self: List[str], a_: Optional[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = data _snake_case : List[Any] = None _snake_case : int = None def UpperCAmelCase__ (): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _snake_case : Union[str, Any] = input("""Enter the value of the root node: """ ).strip().lower() _snake_case : queue.Queue = queue.Queue() _snake_case : List[str] = TreeNode(int(snake_case__ ) ) q.put(snake_case__ ) while not q.empty(): _snake_case : Union[str, Any] = q.get() _snake_case : str = F"Enter the left node of {node_found.data}: " _snake_case : int = input(snake_case__ ).strip().lower() or """n""" if check == "n": return tree_node _snake_case : Optional[Any] = TreeNode(int(snake_case__ ) ) _snake_case : List[Any] = left_node q.put(snake_case__ ) _snake_case : List[str] = F"Enter the right node of {node_found.data}: " _snake_case : Optional[Any] = input(snake_case__ ).strip().lower() or """n""" if check == "n": return tree_node _snake_case : List[Any] = TreeNode(int(snake_case__ ) ) _snake_case : Union[str, Any] = right_node q.put(snake_case__ ) raise def UpperCAmelCase__ (snake_case__ : TreeNode ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def UpperCAmelCase__ (snake_case__ : TreeNode ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def UpperCAmelCase__ (snake_case__ : TreeNode ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def UpperCAmelCase__ (snake_case__ : TreeNode ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or not node: return _snake_case : queue.Queue = queue.Queue() q.put(snake_case__ ) while not q.empty(): _snake_case : int = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def UpperCAmelCase__ (snake_case__ : TreeNode ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or not node: return _snake_case : queue.Queue = queue.Queue() q.put(snake_case__ ) while not q.empty(): _snake_case : Any = [] while not q.empty(): _snake_case : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case__ ) def UpperCAmelCase__ (snake_case__ : TreeNode ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or not node: return _snake_case : list[TreeNode] = [] _snake_case : List[str] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(snake_case__ ) _snake_case : List[Any] = n.left # end of while means current node doesn't have left child _snake_case : Optional[int] = stack.pop() # start to traverse its right child _snake_case : Optional[int] = n.right def UpperCAmelCase__ (snake_case__ : TreeNode ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or not node: return _snake_case : list[TreeNode] = [] _snake_case : Dict = node while n or stack: while n: stack.append(snake_case__ ) _snake_case : Tuple = n.left _snake_case : List[str] = stack.pop() print(n.data , end=""",""" ) _snake_case : Dict = n.right def UpperCAmelCase__ (snake_case__ : TreeNode ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or not node: return _snake_case , _snake_case : Union[str, Any] = [], [] _snake_case : List[Any] = node stacka.append(snake_case__ ) while stacka: # to find the reversed order of post order, store it in stack2 _snake_case : List[Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case__ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def UpperCAmelCase__ (snake_case__ : str = "" , snake_case__ : List[str]=50 , snake_case__ : str="*" ): """simple docstring""" if not s: return "\n" + width * char _snake_case , _snake_case : Any = divmod(width - len(snake_case__ ) - 2 , 2 ) return F"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) A_ = build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 50 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
28
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" ) return image def UpperCAmelCase__ (snake_case__ : Any ): """simple docstring""" _snake_case : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Optional[int] = val def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" ) _snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict _snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) _snake_case : Dict = qkv_bias def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24 _snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict() elif "opt-6.7b" in model_name: _snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict() elif "t5-xl" in model_name: _snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ ) return config, image_size @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ): """simple docstring""" _snake_case : List[str] = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0] _snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ ) _snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval() _snake_case : int = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _snake_case , _snake_case : List[Any] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" _snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print("""Done!""" ) # update state dict keys _snake_case : Any = original_model.state_dict() _snake_case : Dict = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _snake_case : str = state_dict.pop(snake_case__ ) if key.startswith("""Qformer.bert""" ): _snake_case : str = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _snake_case : Any = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _snake_case : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _snake_case : List[Any] = key.replace("""t5""" , """language""" ) _snake_case : str = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) _snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) assert len(snake_case__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _snake_case : Any = load_demo_image() _snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) _snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ ) # create processor _snake_case : Any = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) _snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case__ , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "opt" in model_name: _snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _snake_case : int = hf_model(snake_case__ , snake_case__ ).logits else: _snake_case : str = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _snake_case : List[str] = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _snake_case : Union[str, Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ ) else: # cast to same type _snake_case : int = logits.dtype assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _snake_case : Any = """""" _snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ ) _snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} ) _snake_case : Tuple = hf_model.generate( snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , snake_case__ ) _snake_case : Optional[Any] = input_ids.shape[1] _snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ ) _snake_case : Optional[Any] = [text.strip() for text in output_text] print("""HF generation:""" , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F"nielsr/{model_name}" ) hf_model.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() A_ = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) A_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
28
1
"""simple docstring""" import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration A_ = 5_00_00 A_ = 50_00 A_ , A_ = os.path.split(__file__) A_ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def UpperCAmelCase__ (snake_case__ : datasets.Dataset , snake_case__ : Dict ): """simple docstring""" for i in range(snake_case__ ): _snake_case : Dict = dataset[i] @get_duration def UpperCAmelCase__ (snake_case__ : datasets.Dataset , snake_case__ : Optional[Any] , snake_case__ : str ): """simple docstring""" for i in range(0 , len(snake_case__ ) , snake_case__ ): _snake_case : Tuple = dataset[i : i + batch_size] @get_duration def UpperCAmelCase__ (snake_case__ : datasets.Dataset , snake_case__ : str , snake_case__ : int ): """simple docstring""" with dataset.formatted_as(type=snake_case__ ): for i in range(snake_case__ ): _snake_case : Union[str, Any] = dataset[i] @get_duration def UpperCAmelCase__ (snake_case__ : datasets.Dataset , snake_case__ : Any , snake_case__ : int , snake_case__ : Union[str, Any] ): """simple docstring""" with dataset.formatted_as(type=snake_case__ ): for i in range(0 , snake_case__ , snake_case__ ): _snake_case : Optional[Any] = dataset[i : i + batch_size] def UpperCAmelCase__ (): """simple docstring""" _snake_case : Dict = {"""num examples""": SPEED_TEST_N_EXAMPLES} _snake_case : int = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}), ] _snake_case : Optional[Any] = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("""generating dataset""" ) _snake_case : Optional[int] = datasets.Features( {"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} ) _snake_case : Optional[int] = generate_example_dataset( os.path.join(snake_case__ , """dataset.arrow""" ) , snake_case__ , num_examples=snake_case__ , seq_shapes={"""list""": (1_00,)} , ) print("""first set of iterations""" ) for func, kwargs in functions: print(func.__name__ , str(snake_case__ ) ) _snake_case : List[Any] = func(snake_case__ , **snake_case__ ) print("""shuffling dataset""" ) _snake_case : Tuple = dataset.shuffle() print("""Second set of iterations (after shuffling""" ) for func, kwargs in functions_shuffled: print("""shuffled """ , func.__name__ , str(snake_case__ ) ) _snake_case : List[Any] = func( snake_case__ , **snake_case__ ) with open(snake_case__ , """wb""" ) as f: f.write(json.dumps(snake_case__ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
28
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" print("""Loading config file...""" ) def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ): _snake_case : Union[str, Any] = [] for k, v in d.items(): _snake_case : List[str] = parent_key + sep + k if parent_key else k if isinstance(snake_case__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() ) else: items.append((new_key, v) ) return dict(snake_case__ ) _snake_case : Dict = argparse.Namespace() with open(snake_case__ , """r""" ) as yaml_file: try: _snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader ) _snake_case : Any = flatten_yaml_as_dict(snake_case__ ) for k, v in flat_cfg.items(): setattr(snake_case__ , snake_case__ , snake_case__ ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) ) return config def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : Dict = MobileViTVaConfig() _snake_case : Optional[int] = False # dataset if task_name.startswith("""imagenet1k_""" ): _snake_case : Dict = 10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Union[str, Any] = 3_84 else: _snake_case : Optional[Any] = 2_56 _snake_case : str = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _snake_case : str = 2_10_00 if int(task_name.strip().split("""_""" )[-1] ) == 3_84: _snake_case : Dict = 3_84 else: _snake_case : Union[str, Any] = 2_56 _snake_case : Tuple = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _snake_case : Tuple = 1_51 _snake_case : str = 5_12 _snake_case : List[Any] = """ade20k-id2label.json""" _snake_case : Union[str, Any] = True elif task_name.startswith("""voc_""" ): _snake_case : List[Any] = 21 _snake_case : List[str] = 5_12 _snake_case : int = """pascal-voc-id2label.json""" _snake_case : int = True # orig_config _snake_case : int = load_orig_config_file(snake_case__ ) assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 ) _snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _snake_case : Union[str, Any] = """huggingface/label-files""" _snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : Tuple = idalabel _snake_case : Any = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): """simple docstring""" _snake_case : List[str] = dct.pop(snake_case__ ) _snake_case : List[Any] = val def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ): """simple docstring""" if base_model: _snake_case : Any = """""" else: _snake_case : Union[str, Any] = """mobilevitv2.""" _snake_case : Dict = [] for k in state_dict.keys(): if k[:8] == "encoder.": _snake_case : List[str] = k[8:] else: _snake_case : str = k if ".block." in k: _snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." ) for i in [1, 2]: if F"layer_{i}." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"layer_{i}.0." in k: _snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if F"layer_{i}.1.local_rep.0." in k: _snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if F"layer_{i}.1.local_rep.1." in k: _snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: _snake_case : Optional[Any] = [0, 1] elif i == 4: _snake_case : Any = [0, 1, 2, 3] elif i == 5: _snake_case : List[Any] = [0, 1, 2] for j in j_in: if F"layer_{i}.1.global_rep.{j}." in k: _snake_case : Any = k_new.replace( F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if F"layer_{i}.1.global_rep.{j+1}." in k: _snake_case : List[Any] = k_new.replace( F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." ) if F"layer_{i}.1.conv_proj." in k: _snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[str] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(snake_case__ ) for k in keys_to_ignore: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): """simple docstring""" _snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval() _snake_case : List[Any] = False else: _snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval() _snake_case : Optional[Any] = False # remove and rename some keys of load the original model _snake_case : Union[str, Any] = checkpoint remove_unused_keys(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load modified state_dict model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) # verify classification model if task_name.startswith("""imagenet""" ): _snake_case : List[str] = outputs.logits _snake_case : Any = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
28
1
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers A_ = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
28
"""simple docstring""" import os import sys import unittest A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path A_ = os.path.join(git_repo_path, '''src''', '''diffusers''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" ) self.assertEqual(a_, """torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(a_, """torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case : Union[str, Any] = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(a_, """torch_and_transformers_and_onnx""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""", a_ ) self.assertIn("""torch_and_transformers""", a_ ) self.assertIn("""flax_and_transformers""", a_ ) self.assertIn("""torch_and_transformers_and_onnx""", a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""", objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" ) self.assertEqual(a_, """\nCONSTANT = None\n""" ) _snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" ) self.assertEqual( a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) _snake_case : List[Any] = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ _snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ _snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""], a_ )
28
1
"""simple docstring""" from random import randint, random def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : int = 5 , ): """simple docstring""" _snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car _snake_case : List[Any] = 0 _snake_case : Any = max(snake_case__ , 0 ) while i < number_of_cells: _snake_case : Optional[int] = ( randint(0 , snake_case__ ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def UpperCAmelCase__ (snake_case__ : list , snake_case__ : int ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : Dict = highway_now[car_index + 1 :] for cell in range(len(snake_case__ ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(snake_case__ , -1 ) def UpperCAmelCase__ (snake_case__ : list , snake_case__ : float , snake_case__ : int ): """simple docstring""" _snake_case : Tuple = len(snake_case__ ) # Beforce calculations, the highway is empty _snake_case : Optional[int] = [-1] * number_of_cells for car_index in range(snake_case__ ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed _snake_case : Tuple = min(highway_now[car_index] + 1 , snake_case__ ) # Number of empty cell before the next car _snake_case : Union[str, Any] = get_distance(snake_case__ , snake_case__ ) - 1 # We can't have the car causing an accident _snake_case : List[Any] = min(next_highway[car_index] , snake_case__ ) if random() < probability: # Randomly, a driver will slow down _snake_case : int = max(next_highway[car_index] - 1 , 0 ) return next_highway def UpperCAmelCase__ (snake_case__ : list , snake_case__ : int , snake_case__ : float , snake_case__ : int ): """simple docstring""" _snake_case : str = len(highway[0] ) for i in range(snake_case__ ): _snake_case : List[Any] = update(highway[i] , snake_case__ , snake_case__ ) _snake_case : Optional[int] = [-1] * number_of_cells for car_index in range(snake_case__ ): _snake_case : List[Any] = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) _snake_case : Dict = (car_index + speed) % number_of_cells # Commit the change of position _snake_case : Optional[Any] = speed highway.append(snake_case__ ) return highway if __name__ == "__main__": import doctest doctest.testmod()
28
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A_ = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''OwlViTFeatureExtractor'''] A_ = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
28
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int = 10_00 ): """simple docstring""" return sum(e for e in range(3 , snake_case__ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F'''{solution() = }''')
28
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ): """simple docstring""" def run_func(snake_case__ : Tuple ): @wraps(snake_case__ ) def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ): return func(*snake_case__ , **snake_case__ ) @wraps(snake_case__ ) @tf.function(experimental_compile=snake_case__ ) def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ): return func(*snake_case__ , **snake_case__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ): """simple docstring""" _snake_case : List[str] = random.Random() _snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowercase( __a ): '''simple docstring''' lowercase__ = 42 lowercase__ = 42 lowercase__ = "TensorFlow" @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return tf.__version__ def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[str] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_speed(_inference ) def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ ) return self._measure_speed(_train ) def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ ) return self._measure_memory(_inference ) def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ ) _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ ) return self._measure_memory(_train ) def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : List[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : List[Any] = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Dict = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : List[str] = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_forward(): return model(a_, decoder_input_ids=a_, training=a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_forward(): return model(a_, training=a_ ) _snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ): '''simple docstring''' _snake_case : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _snake_case : Tuple = ( hasattr(a_, """architectures""" ) and isinstance(config.architectures, a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : str = __import__("""transformers""", fromlist=[model_class] ) _snake_case : Tuple = getattr(a_, a_ ) _snake_case : Any = model_cls(a_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently _snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size _snake_case : int = random_input_ids(a_, a_, a_ ) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_decoder_train(): _snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0] _snake_case : str = tf.gradients(a_, model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla ) def encoder_train(): _snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0] _snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables ) return gradients _snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(a_, repeat=1, number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case : Dict = timeit.repeat( a_, repeat=self.args.repeat, number=10, ) return min(a_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _snake_case : List[Any] = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _snake_case : Optional[Any] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ ) _snake_case : List[str] = meminfo.used _snake_case : Any = Memory(a_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _snake_case : List[Any] = None else: _snake_case : int = measure_peak_memory_cpu(a_ ) _snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case : Tuple = stop_memory_tracing(a_ ) if memory is None: _snake_case : int = summary.total else: _snake_case : int = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) return "N/A", None
28
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "feature_extractor"] lowercase__ = "TvltImageProcessor" lowercase__ = "TvltFeatureExtractor" def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ): '''simple docstring''' super().__init__(image_processor=a_, feature_extractor=a_ ) _snake_case : Any = image_processor _snake_case : Dict = feature_extractor def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ): '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) _snake_case : Optional[int] = None if images is not None: _snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ ) if images_mixed is not None: _snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ ) if audio is not None: _snake_case : Any = self.feature_extractor( a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ ) _snake_case : List[str] = {} if audio is not None: output_dict.update(a_ ) if images is not None: output_dict.update(a_ ) if images_mixed_dict is not None: output_dict.update(a_ ) return output_dict @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Dict = self.image_processor.model_input_names _snake_case : List[str] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ): """simple docstring""" _snake_case : str = int(snake_case__ ) # Initialize Result _snake_case : str = [] # Traverse through all denomination for denomination in reversed(snake_case__ ): # Find denominations while int(snake_case__ ) >= int(snake_case__ ): total_value -= int(snake_case__ ) answer.append(snake_case__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A_ = [] A_ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): A_ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) A_ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] A_ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F'''Following is minimal change for {value}: ''') A_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
28
1
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ = logging.getLogger(__name__) def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[Any] ): """simple docstring""" return (preds == labels).mean() @dataclass class lowercase: '''simple docstring''' lowercase__ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) lowercase__ = field( default=__a , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowercase__ = field( default=__a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) lowercase__ = field( default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class lowercase: '''simple docstring''' lowercase__ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) lowercase__ = field(metadata={"help": "Should contain the data files for the task."} ) lowercase__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowercase__ = field( default=__a , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _snake_case , _snake_case , _snake_case : List[Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , snake_case__ ) # Set seed set_seed(training_args.seed ) try: _snake_case : int = processors[data_args.task_name]() _snake_case : Any = processor.get_labels() _snake_case : int = len(snake_case__ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _snake_case : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) _snake_case : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _snake_case : Optional[int] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , ) # Get datasets _snake_case : List[str] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=snake_case__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _snake_case : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=snake_case__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(snake_case__ : EvalPrediction ) -> Dict: _snake_case : List[Any] = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(snake_case__ , p.label_ids )} # Data collator _snake_case : Any = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _snake_case : int = Trainer( model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _snake_case : Optional[int] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _snake_case : Optional[int] = trainer.evaluate() _snake_case : int = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(snake_case__ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , snake_case__ , snake_case__ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(snake_case__ ) return results def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" main() if __name__ == "__main__": main()
28
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowercase: '''simple docstring''' def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ): '''simple docstring''' _snake_case : Optional[int] = parent _snake_case : Optional[Any] = 100 _snake_case : Any = batch_size _snake_case : List[Any] = image_size _snake_case : Optional[Any] = patch_size _snake_case : str = num_channels _snake_case : Tuple = is_training _snake_case : Tuple = use_labels _snake_case : Any = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Union[str, Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : str = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : Optional[Any] = type_sequence_label_size _snake_case : Any = initializer_range _snake_case : List[str] = scope _snake_case : int = out_indices _snake_case : Optional[Any] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case : Dict = (image_size // patch_size) ** 2 _snake_case : str = num_patches + 1 def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : List[Any] = None _snake_case : Tuple = None if self.use_labels: _snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) _snake_case : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ): '''simple docstring''' _snake_case : str = BeitModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Dict = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = BeitForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Any = self.type_sequence_label_size _snake_case : Any = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case : Any = 1 _snake_case : str = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : Optional[Any] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ): '''simple docstring''' _snake_case : List[str] = self.num_labels _snake_case : List[Any] = BeitForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() _snake_case : List[str] = model(a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) _snake_case : str = model(a_, labels=a_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = BeitModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""BEiT does not use inputs_embeds""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) _snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_, nn.Linear ) ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Any = model_class(a_ ) _snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[Any] = [*signature.parameters.keys()] _snake_case : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]: continue _snake_case : List[Any] = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : List[Any] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _snake_case : Dict = False _snake_case : Optional[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Any = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() _snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : int = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : int = _config_zero_init(a_ ) for model_class in self.all_model_classes: _snake_case : Tuple = model_class(config=a_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = BeitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ ) _snake_case : Dict = self.default_image_processor _snake_case : Dict = prepare_img() _snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ ) # prepare bool_masked_pos _snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Optional[int] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[Any] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ ) _snake_case : List[Any] = self.default_image_processor _snake_case : Any = prepare_img() _snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : int = model(**a_ ) _snake_case : Optional[int] = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(logits.shape, a_ ) _snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : str = 281 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to( a_ ) _snake_case : int = self.default_image_processor _snake_case : Optional[Any] = prepare_img() _snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Union[str, Any] = model(**a_ ) _snake_case : Dict = outputs.logits # verify the logits _snake_case : Tuple = torch.Size((1, 21_841) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) ) _snake_case : List[str] = 2_396 self.assertEqual(logits.argmax(-1 ).item(), a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : int = model.to(a_ ) _snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] ) _snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits # verify the logits _snake_case : List[str] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape, a_ ) _snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" ) if is_pillow_less_than_a: _snake_case : Any = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ], device=a_, ) else: _snake_case : Optional[Any] = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ], device=a_, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) ) @slow def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _snake_case : List[Any] = model.to(a_ ) _snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ ) _snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" ) _snake_case : str = Image.open(ds[0]["""file"""] ) _snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) _snake_case : Union[str, Any] = outputs.logits.detach().cpu() _snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] ) _snake_case : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape, a_ ) _snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ ) _snake_case : List[str] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape, a_ )
28
1
"""simple docstring""" A_ = [ '''DownloadConfig''', '''DownloadManager''', '''DownloadMode''', '''StreamingDownloadManager''', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
28
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase( __a ): '''simple docstring''' lowercase__ = (IPNDMScheduler,) lowercase__ = (("num_inference_steps", 50),) def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = {"""num_train_timesteps""": 1_000} config.update(**a_ ) return config def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = dict(self.forward_default_kwargs ) _snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[Any] = self.dummy_sample _snake_case : Dict = 0.1 * sample _snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : int = self.get_scheduler_config(**a_ ) _snake_case : Dict = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : int = dummy_past_residuals[:] if time_step is None: _snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : Tuple = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals _snake_case : Optional[Any] = dummy_past_residuals[:] _snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' pass def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ ) _snake_case : Optional[int] = self.dummy_sample _snake_case : Tuple = 0.1 * sample _snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case : Any = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case : Union[str, Any] = dummy_past_residuals[:] if time_step is None: _snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) _snake_case : List[str] = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case : List[str] = dummy_past_residuals[:] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config(**a_ ) _snake_case : List[Any] = scheduler_class(**a_ ) _snake_case : Union[str, Any] = 10 _snake_case : Union[str, Any] = self.dummy_model() _snake_case : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : Optional[Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _snake_case : Union[str, Any] = model(a_, a_ ) _snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample return sample def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : int = kwargs.pop("""num_inference_steps""", a_ ) for scheduler_class in self.scheduler_classes: _snake_case : Union[str, Any] = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**a_ ) _snake_case : Dict = self.dummy_sample _snake_case : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ): _snake_case : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _snake_case : List[str] = dummy_past_residuals[:] _snake_case : Optional[int] = scheduler.timesteps[5] _snake_case : Optional[Any] = scheduler.timesteps[6] _snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample _snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=a_, time_step=a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.full_loop() _snake_case : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 2_540_529 ) < 10
28
1
"""simple docstring""" import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class lowercase( __a ): '''simple docstring''' def __init__( self: int, a_: int, a_: int, a_: Any=1_024, a_: List[Any]=1_024, a_: Optional[Any]=3.6 ): '''simple docstring''' _snake_case : Union[str, Any] = tokenizer _snake_case : int = tokenizer.bos_token_id _snake_case : Any = dataset _snake_case : str = seq_length _snake_case : List[Any] = seq_length * chars_per_token * num_of_sequences def __iter__( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = iter(self.dataset ) _snake_case : Optional[int] = True while more_examples: _snake_case , _snake_case : Optional[Any] = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(a_ )["""content"""] ) buffer_len += len(buffer[-1] ) except StopIteration: _snake_case : Any = False break _snake_case : List[str] = tokenizer(a_, truncation=a_ )["""input_ids"""] _snake_case : Any = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0, len(a_ ), self.seq_length ): _snake_case : Union[str, Any] = all_token_ids[i : i + self.seq_length] if len(a_ ) == self.seq_length: yield torch.tensor(a_ ) def UpperCAmelCase__ (snake_case__ : Tuple ): """simple docstring""" _snake_case : Tuple = {"""streaming""": True} _snake_case : Dict = load_dataset(args.dataset_name , split="""train""" , **snake_case__ ) _snake_case : Union[str, Any] = ConstantLengthDataset(snake_case__ , snake_case__ , seq_length=args.seq_length ) _snake_case : str = DataLoader(snake_case__ , batch_size=args.batch_size ) return eval_dataloader def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" model.eval() _snake_case : int = [] for step, batch in enumerate(snake_case__ ): with torch.no_grad(): _snake_case : int = model(snake_case__ , labels=snake_case__ ) _snake_case : List[Any] = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(snake_case__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break _snake_case : List[str] = torch.mean(torch.cat(snake_case__ ) ) try: _snake_case : Tuple = torch.exp(snake_case__ ) except OverflowError: _snake_case : Dict = float("""inf""" ) return loss.item(), perplexity.item() # Setup Accelerator A_ = Accelerator() # Parse configuration A_ = HfArgumentParser(EvaluationArguments) A_ = parser.parse_args() set_seed(args.seed) # Logging A_ = logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer A_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) A_ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader A_ = create_dataloader(args) # Prepare everything with our `accelerator`. A_ , A_ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') A_ , A_ = evaluate(args) logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
28
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _snake_case : Any = [] for num in range(len(snake_case__ ) ): _snake_case : Optional[int] = 0 while 2 * i * i <= odd_composites[num]: _snake_case : Optional[int] = odd_composites[num] - 2 * i * i if is_prime(snake_case__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case__ ) == n: return list_nums return [] def UpperCAmelCase__ (): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
28
1
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ): '''simple docstring''' _snake_case : int = parent _snake_case : int = batch_size _snake_case : List[Any] = image_size _snake_case : List[str] = num_channels _snake_case : Tuple = num_stages _snake_case : Union[str, Any] = hidden_sizes _snake_case : List[Any] = depths _snake_case : Tuple = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[Any] = num_labels _snake_case : Tuple = initializer_range _snake_case : Tuple = out_features _snake_case : Tuple = out_indices _snake_case : Dict = scope def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size], self.num_labels ) _snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : int = ConvNextVaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = ConvNextVaForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : List[str] = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Tuple = None _snake_case : Tuple = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : str = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = ConvNextVaModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : List[Any] = True if model_class.__name__ in [ *get_values(a_ ), *get_values(a_ ), ]: continue _snake_case : Tuple = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Any = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : Any = False _snake_case : List[Any] = True if ( model_class.__name__ in [*get_values(a_ ), *get_values(a_ )] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Dict = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() _snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Optional[int] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : int = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[str] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = ConvNextVaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ): '''simple docstring''' _snake_case : Optional[int] = device _snake_case : str = CLIPTokenizerFast.from_pretrained(a_ ) _snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073] _snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std ) _snake_case : Optional[int] = torchvision.transforms.Resize(224 ) _snake_case : str = torchvision.transforms.CenterCrop(224 ) def UpperCamelCase_ ( self: List[str], a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.resize(a_ ) _snake_case : List[Any] = self.center_crop(a_ ) _snake_case : Optional[Any] = self.normalize(a_ ) return images def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.tokenizer(text=a_, **a_ ) _snake_case : Any = self.preprocess_img(a_ ) _snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ): '''simple docstring''' super().__init__() _snake_case : int = None _snake_case : List[str] = device if device else get_device() if vqgan: _snake_case : Any = vqgan else: _snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ ) self.vqgan.eval() if clip: _snake_case : Tuple = clip else: _snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) _snake_case : List[str] = ProcessorGradientFlow(device=self.device ) _snake_case : Union[str, Any] = iterations _snake_case : Dict = lr _snake_case : Optional[int] = log _snake_case : List[str] = make_grid _snake_case : Union[str, Any] = return_val _snake_case : List[str] = quantize _snake_case : List[str] = self.vqgan.decoder.z_shape def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ): '''simple docstring''' _snake_case : Dict = [] if output_path is None: _snake_case : Tuple = """./animation.gif""" if input_path is None: _snake_case : Any = self.save_path _snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) ) if not len(a_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(a_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) _snake_case : List[Any] = total_duration / len(a_ ) _snake_case : Optional[Any] = [frame_duration] * len(a_ ) if extend_frames: _snake_case : Optional[int] = 1.5 _snake_case : int = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(a_ ) ) imageio.mimsave(a_, a_, duration=a_ ) print(f"gif saved to {output_path}" ) def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ): '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError _snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device ) _snake_case : int = preprocess_vqgan(a_ ) _snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ ) return z def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.latent.detach().requires_grad_() _snake_case : Tuple = base_latent + transform_vector if self.quantize: _snake_case , *_snake_case : Any = self.vqgan.quantize(a_ ) else: _snake_case : List[Any] = trans_latent return self.vqgan.decode(a_ ) def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ): '''simple docstring''' _snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ ) _snake_case : Any = self.clip(**a_ ) _snake_case : str = clip_outputs.logits_per_image if weights is not None: _snake_case : Any = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ): '''simple docstring''' _snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: _snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] ) else: _snake_case : Tuple = torch.tensor([1], device=self.device ) _snake_case : int = -torch.log(a_ ) + torch.log(a_ ) return loss def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ): '''simple docstring''' _snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device ) _snake_case : Dict = torch.optim.Adam([vector], lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() _snake_case : str = self._add_vector(a_ ) _snake_case : List[Any] = loop_post_process(a_ ) _snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ ) print("""CLIP loss""", a_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=a_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ): '''simple docstring''' wandb.init(reinit=a_, project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: _snake_case : Any = Image.open(a_ ) _snake_case : str = image.resize((256, 256) ) wandb.log("""Original Image""", wandb.Image(a_ ) ) def UpperCamelCase_ ( self: str, a_: List[Any] ): '''simple docstring''' if not prompts: return [] _snake_case : List[str] = [] _snake_case : Tuple = [] if isinstance(a_, a_ ): _snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(a_, (tuple, list) ): _snake_case : List[Any] = prompt[0] _snake_case : Optional[Any] = float(prompt[1] ) elif ":" in prompt: _snake_case , _snake_case : List[Any] = prompt.split(""":""" ) _snake_case : str = float(a_ ) else: _snake_case : int = prompt _snake_case : Union[str, Any] = 1.0 processed_prompts.append(a_ ) weights.append(a_ ) return { "prompts": processed_prompts, "weights": torch.tensor(a_, device=self.device ), } def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ): '''simple docstring''' if image_path: _snake_case : Union[str, Any] = self._get_latent(a_ ) else: _snake_case : Any = torch.randn(self.latent_dim, device=self.device ) if self.log: self._init_logging(a_, a_, a_ ) assert pos_prompts, "You must provide at least one positive prompt." _snake_case : str = self.process_prompts(a_ ) _snake_case : Dict = self.process_prompts(a_ ) if save_final and save_path is None: _snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(a_ ): os.makedirs(a_ ) else: _snake_case : List[Any] = save_path + """_""" + get_timestamp() os.makedirs(a_ ) _snake_case : Optional[Any] = save_path _snake_case : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(a_ ) ) _snake_case : List[Any] = loop_post_process(a_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ): if show_intermediate: show_pil(a_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(a_ )} ) if show_final: show_pil(a_ ) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
28
1
"""simple docstring""" import sys from collections import defaultdict class lowercase: '''simple docstring''' def __init__( self: Optional[Any] ): '''simple docstring''' _snake_case : Any = [] def UpperCamelCase_ ( self: List[Any], a_: Any ): '''simple docstring''' return self.node_position[vertex] def UpperCamelCase_ ( self: Any, a_: List[Any], a_: str ): '''simple docstring''' _snake_case : Optional[Any] = pos def UpperCamelCase_ ( self: List[Any], a_: int, a_: Dict, a_: int, a_: Optional[Any] ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _snake_case : Dict = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _snake_case : int = 2 * start + 1 else: _snake_case : Optional[Any] = 2 * start + 2 if heap[smallest_child] < heap[start]: _snake_case , _snake_case : Any = heap[smallest_child], positions[smallest_child] _snake_case , _snake_case : Tuple = ( heap[start], positions[start], ) _snake_case , _snake_case : Optional[Any] = temp, tempa _snake_case : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child], self.get_position(positions[start] ) ) self.set_position(positions[start], a_ ) self.top_to_bottom(a_, a_, a_, a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: Dict ): '''simple docstring''' _snake_case : str = position[index] while index != 0: _snake_case : Tuple = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: _snake_case : Union[str, Any] = heap[parent] _snake_case : int = position[parent] self.set_position(position[parent], a_ ) else: _snake_case : List[Any] = val _snake_case : Any = temp self.set_position(a_, a_ ) break _snake_case : int = parent else: _snake_case : Tuple = val _snake_case : List[Any] = temp self.set_position(a_, 0 ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[int], a_: int ): '''simple docstring''' _snake_case : Any = len(a_ ) // 2 - 1 for i in range(a_, -1, -1 ): self.top_to_bottom(a_, a_, len(a_ ), a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: List[str], a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = positions[0] _snake_case : str = sys.maxsize self.top_to_bottom(a_, 0, len(a_ ), a_ ) return temp def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : Optional[Any] = Heap() _snake_case : Optional[Any] = [0] * len(snake_case__ ) _snake_case : Dict = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _snake_case : List[str] = [] # Heap of Distance of vertices from their neighboring vertex _snake_case : Dict = [] for vertex in range(len(snake_case__ ) ): distance_tv.append(sys.maxsize ) positions.append(snake_case__ ) heap.node_position.append(snake_case__ ) _snake_case : Any = [] _snake_case : Tuple = 1 _snake_case : List[str] = sys.maxsize for neighbor, distance in adjacency_list[0]: _snake_case : List[Any] = 0 _snake_case : Dict = distance heap.heapify(snake_case__ , snake_case__ ) for _ in range(1 , len(snake_case__ ) ): _snake_case : Dict = heap.delete_minimum(snake_case__ , snake_case__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _snake_case : List[Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(snake_case__ )] ): _snake_case : Any = distance heap.bottom_to_top( snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ ) _snake_case : Tuple = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > A_ = int(input('''Enter number of edges: ''').strip()) A_ = defaultdict(list) for _ in range(edges_number): A_ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
28
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _snake_case : Dict = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
28
1
"""simple docstring""" import numpy as np A_ = [ ['''a''', '''b''', '''c''', '''d''', '''e'''], ['''f''', '''g''', '''h''', '''i''', '''k'''], ['''l''', '''m''', '''n''', '''o''', '''p'''], ['''q''', '''r''', '''s''', '''t''', '''u'''], ['''v''', '''w''', '''x''', '''y''', '''z'''], ] class lowercase: '''simple docstring''' def __init__( self: Dict ): '''simple docstring''' _snake_case : List[Any] = np.array(a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: str ): '''simple docstring''' _snake_case , _snake_case : Union[str, Any] = np.where(letter == self.SQUARE ) _snake_case : Dict = np.concatenate([indexa + 1, indexa + 1] ) return indexes def UpperCamelCase_ ( self: Union[str, Any], a_: int, a_: int ): '''simple docstring''' _snake_case : List[str] = self.SQUARE[indexa - 1, indexa - 1] return letter def UpperCamelCase_ ( self: Any, a_: str ): '''simple docstring''' _snake_case : int = message.lower() _snake_case : str = message.replace(""" """, """""" ) _snake_case : Optional[int] = message.replace("""j""", """i""" ) _snake_case : List[str] = np.empty((2, len(a_ )) ) for letter_index in range(len(a_ ) ): _snake_case : List[Any] = self.letter_to_numbers(message[letter_index] ) _snake_case : Union[str, Any] = numbers[0] _snake_case : Optional[int] = numbers[1] _snake_case : Tuple = first_step.reshape(2 * len(a_ ) ) _snake_case : Optional[Any] = """""" for numbers_index in range(len(a_ ) ): _snake_case : int = int(second_step[numbers_index * 2] ) _snake_case : Tuple = int(second_step[(numbers_index * 2) + 1] ) _snake_case : Optional[int] = self.numbers_to_letter(a_, a_ ) _snake_case : int = encoded_message + letter return encoded_message def UpperCamelCase_ ( self: Any, a_: str ): '''simple docstring''' _snake_case : List[Any] = message.lower() message.replace(""" """, """""" ) _snake_case : Any = np.empty(2 * len(a_ ) ) for letter_index in range(len(a_ ) ): _snake_case : List[str] = self.letter_to_numbers(message[letter_index] ) _snake_case : str = numbers[0] _snake_case : Dict = numbers[1] _snake_case : int = first_step.reshape((2, len(a_ )) ) _snake_case : Optional[Any] = """""" for numbers_index in range(len(a_ ) ): _snake_case : Union[str, Any] = int(second_step[0, numbers_index] ) _snake_case : Tuple = int(second_step[1, numbers_index] ) _snake_case : List[str] = self.numbers_to_letter(a_, a_ ) _snake_case : List[str] = decoded_message + letter return decoded_message
28
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ): '''simple docstring''' _snake_case : int = parent _snake_case : int = batch_size _snake_case : List[Any] = image_size _snake_case : List[str] = num_channels _snake_case : Tuple = num_stages _snake_case : Union[str, Any] = hidden_sizes _snake_case : List[Any] = depths _snake_case : Tuple = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[Any] = num_labels _snake_case : Tuple = initializer_range _snake_case : Tuple = out_features _snake_case : Tuple = out_indices _snake_case : Dict = scope def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size], self.num_labels ) _snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : int = ConvNextVaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = ConvNextVaForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : List[str] = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Tuple = None _snake_case : Tuple = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : str = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = ConvNextVaModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : List[Any] = True if model_class.__name__ in [ *get_values(a_ ), *get_values(a_ ), ]: continue _snake_case : Tuple = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Any = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : Any = False _snake_case : List[Any] = True if ( model_class.__name__ in [*get_values(a_ ), *get_values(a_ )] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Dict = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() _snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Optional[int] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : int = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[str] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = ConvNextVaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
1