code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = {} _lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""] _lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments) lowerCAmelCase : int =parser.parse_args() if args.num_workers is None: lowerCAmelCase : Any =multiprocessing.cpu_count() lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase : str =time.time() lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase : Dict =time.time() lowerCAmelCase : Dict =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
15
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
1
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any]=13 , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : str=4 , _UpperCamelCase : Optional[int]=[10, 20, 30, 40] , _UpperCamelCase : int=[2, 2, 3, 2] , _UpperCamelCase : List[str]=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : List[Any]=37 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : int=10 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , _UpperCamelCase : str=[2, 3, 4] , _UpperCamelCase : Optional[Any]=None , ) ->Dict: """simple docstring""" _lowerCamelCase : Dict = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : str = image_size _lowerCamelCase : Optional[Any] = num_channels _lowerCamelCase : Optional[Any] = num_stages _lowerCamelCase : str = hidden_sizes _lowerCamelCase : Optional[Any] = depths _lowerCamelCase : Tuple = is_training _lowerCamelCase : str = use_labels _lowerCamelCase : str = intermediate_size _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : Dict = num_labels _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : Union[str, Any] = out_features _lowerCamelCase : Any = out_indices _lowerCamelCase : Dict = scope def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: """simple docstring""" _lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCamelCase : Dict = None if self.use_labels: _lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels) _lowerCamelCase : Dict = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int) ->Tuple: """simple docstring""" _lowerCamelCase : Union[str, Any] = ConvNextVaModel(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : List[str] = model(_UpperCamelCase) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int]) ->List[Any]: """simple docstring""" _lowerCamelCase : Dict = ConvNextVaForImageClassification(_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Dict = model(_UpperCamelCase , labels=_UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : str) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[Any] = ConvNextVaBackbone(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Union[str, Any] = model(_UpperCamelCase) # verify hidden states self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None _lowerCamelCase : Optional[Any] = None _lowerCamelCase : List[str] = ConvNextVaBackbone(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowerCamelCase : Optional[Any] = model(_UpperCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = config_and_inputs _lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = config_and_inputs _lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _snake_case = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : str = ConvNextVaModelTester(self) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->str: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: """simple docstring""" return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""") def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""") def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""") def _SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _lowerCamelCase : int = True if model_class.__name__ in [ *get_values(_UpperCamelCase), *get_values(_UpperCamelCase), ]: continue _lowerCamelCase : Dict = model_class(_UpperCamelCase) model.to(_UpperCamelCase) model.train() _lowerCamelCase : int = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase) _lowerCamelCase : List[str] = model(**_UpperCamelCase).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_with_labels() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : int = True if ( model_class.__name__ in [*get_values(_UpperCamelCase), *get_values(_UpperCamelCase)] or not model_class.supports_gradient_checkpointing ): continue _lowerCamelCase : List[Any] = model_class(_UpperCamelCase) model.to(_UpperCamelCase) model.gradient_checkpointing_enable() model.train() _lowerCamelCase : str = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase) _lowerCamelCase : Optional[Any] = model(**_UpperCamelCase).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(_UpperCamelCase) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()] _lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: """simple docstring""" def check_hidden_states_output(_UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict): _lowerCamelCase : Optional[Any] = model_class(_UpperCamelCase) model.to(_UpperCamelCase) model.eval() with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase)) _lowerCamelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCamelCase : List[Any] = self.model_tester.num_stages self.assertEqual(len(_UpperCamelCase) , expected_num_stages + 1) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : Optional[int] = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: """simple docstring""" _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase) @slow def _SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = ConvNextVaModel.from_pretrained(_UpperCamelCase) self.assertIsNotNone(_UpperCamelCase) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple: """simple docstring""" return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""") if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""").to(_UpperCamelCase) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Optional[int] = preprocessor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase) # forward pass with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**_UpperCamelCase) # verify the logits _lowerCamelCase : str = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , _UpperCamelCase) _lowerCamelCase : str = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6]).to(_UpperCamelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4))
15
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) lowerCAmelCase : Any ={ "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = 'poolformer' def __init__( self : int , _UpperCamelCase : int=3 , _UpperCamelCase : int=16 , _UpperCamelCase : List[Any]=16 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Union[str, Any]=4.0 , _UpperCamelCase : Optional[Any]=[2, 2, 6, 2] , _UpperCamelCase : int=[64, 128, 320, 512] , _UpperCamelCase : Any=[7, 3, 3, 3] , _UpperCamelCase : List[str]=[4, 2, 2, 2] , _UpperCamelCase : List[Any]=[2, 1, 1, 1] , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : List[Any]=True , _UpperCamelCase : str=1E-5 , _UpperCamelCase : List[str]=0.0_2 , **_UpperCamelCase : Optional[int] , ) ->int: """simple docstring""" _lowerCamelCase : Optional[Any] = num_channels _lowerCamelCase : Optional[int] = patch_size _lowerCamelCase : Union[str, Any] = stride _lowerCamelCase : Any = padding _lowerCamelCase : Optional[Any] = pool_size _lowerCamelCase : Any = hidden_sizes _lowerCamelCase : Optional[Any] = mlp_ratio _lowerCamelCase : Tuple = depths _lowerCamelCase : Any = patch_sizes _lowerCamelCase : Tuple = strides _lowerCamelCase : str = num_encoder_blocks _lowerCamelCase : Optional[Any] = drop_path_rate _lowerCamelCase : str = hidden_act _lowerCamelCase : Any = use_layer_scale _lowerCamelCase : List[str] = layer_scale_init_value _lowerCamelCase : Dict = initializer_range super().__init__(**_UpperCamelCase) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = version.parse('1.11' ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->float: """simple docstring""" return 2E-3
15
from __future__ import annotations lowerCAmelCase : int =[] def A__ ( __A , __A , __A ): '''simple docstring''' for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def A__ ( __A , __A ): '''simple docstring''' if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): _lowerCamelCase : int = 1 solve(__A , row + 1 ) _lowerCamelCase : List[str] = 0 return False def A__ ( __A ): '''simple docstring''' for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) lowerCAmelCase : int =8 lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
15
1
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int=0.0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "geglu" , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : str = "layer_norm" , _UpperCamelCase : bool = False , ) ->Tuple: """simple docstring""" super().__init__() _lowerCamelCase : int = only_cross_attention _lowerCamelCase : Tuple = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" _lowerCamelCase : str = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to""" F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""") # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: _lowerCamelCase : Any = AdaLayerNorm(_UpperCamelCase , _UpperCamelCase) elif self.use_ada_layer_norm_zero: _lowerCamelCase : int = AdaLayerNormZero(_UpperCamelCase , _UpperCamelCase) else: _lowerCamelCase : Dict = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase) _lowerCamelCase : Tuple = Attention( query_dim=_UpperCamelCase , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_UpperCamelCase , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. _lowerCamelCase : Tuple = ( AdaLayerNorm(_UpperCamelCase , _UpperCamelCase) if self.use_ada_layer_norm else nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase) ) _lowerCamelCase : Dict = Attention( query_dim=_UpperCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , upcast_attention=_UpperCamelCase , ) # is self-attn if encoder_hidden_states is none else: _lowerCamelCase : Dict = None _lowerCamelCase : Any = None # 3. Feed-forward _lowerCamelCase : Dict = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase) _lowerCamelCase : List[str] = FeedForward(_UpperCamelCase , dropout=_UpperCamelCase , activation_fn=_UpperCamelCase , final_dropout=_UpperCamelCase) # let chunk size default to None _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : Optional[Any] = 0 def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : int) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Tuple = chunk_size _lowerCamelCase : List[Any] = dim def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , _UpperCamelCase : Dict[str, Any] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , ) ->Optional[int]: """simple docstring""" if self.use_ada_layer_norm: _lowerCamelCase : List[Any] = self.norma(_UpperCamelCase , _UpperCamelCase) elif self.use_ada_layer_norm_zero: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.norma( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hidden_dtype=hidden_states.dtype) else: _lowerCamelCase : List[str] = self.norma(_UpperCamelCase) _lowerCamelCase : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {} _lowerCamelCase : List[str] = self.attna( _UpperCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_UpperCamelCase , **_UpperCamelCase , ) if self.use_ada_layer_norm_zero: _lowerCamelCase : List[str] = gate_msa.unsqueeze(1) * attn_output _lowerCamelCase : Optional[Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: _lowerCamelCase : List[Any] = ( self.norma(_UpperCamelCase , _UpperCamelCase) if self.use_ada_layer_norm else self.norma(_UpperCamelCase) ) _lowerCamelCase : List[Any] = self.attna( _UpperCamelCase , encoder_hidden_states=_UpperCamelCase , attention_mask=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : Tuple = attn_output + hidden_states # 3. Feed-forward _lowerCamelCase : List[str] = self.norma(_UpperCamelCase) if self.use_ada_layer_norm_zero: _lowerCamelCase : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""") _lowerCamelCase : str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size _lowerCamelCase : List[str] = torch.cat( [self.ff(_UpperCamelCase) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase , dim=self._chunk_dim)] , dim=self._chunk_dim , ) else: _lowerCamelCase : List[Any] = self.ff(_UpperCamelCase) if self.use_ada_layer_norm_zero: _lowerCamelCase : str = gate_mlp.unsqueeze(1) * ff_output _lowerCamelCase : int = ff_output + hidden_states return hidden_states class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 4 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : str = "geglu" , _UpperCamelCase : bool = False , ) ->int: """simple docstring""" super().__init__() _lowerCamelCase : List[str] = int(dim * mult) _lowerCamelCase : Optional[int] = dim_out if dim_out is not None else dim if activation_fn == "gelu": _lowerCamelCase : int = GELU(_UpperCamelCase , _UpperCamelCase) if activation_fn == "gelu-approximate": _lowerCamelCase : Any = GELU(_UpperCamelCase , _UpperCamelCase , approximate="""tanh""") elif activation_fn == "geglu": _lowerCamelCase : Union[str, Any] = GEGLU(_UpperCamelCase , _UpperCamelCase) elif activation_fn == "geglu-approximate": _lowerCamelCase : Optional[Any] = ApproximateGELU(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = nn.ModuleList([]) # project in self.net.append(_UpperCamelCase) # project dropout self.net.append(nn.Dropout(_UpperCamelCase)) # project out self.net.append(nn.Linear(_UpperCamelCase , _UpperCamelCase)) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(_UpperCamelCase)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Tuple) ->Optional[Any]: """simple docstring""" for module in self.net: _lowerCamelCase : Optional[int] = module(_UpperCamelCase) return hidden_states class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : str = "none") ->Any: """simple docstring""" super().__init__() _lowerCamelCase : int = nn.Linear(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[Any] = approximate def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Optional[int]) ->List[str]: """simple docstring""" if gate.device.type != "mps": return F.gelu(_UpperCamelCase , approximate=self.approximate) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Optional[Any] = self.proj(_UpperCamelCase) _lowerCamelCase : List[str] = self.gelu(_UpperCamelCase) return hidden_states class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int) ->Any: """simple docstring""" super().__init__() _lowerCamelCase : Tuple = nn.Linear(_UpperCamelCase , dim_out * 2) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : int) ->List[str]: """simple docstring""" if gate.device.type != "mps": return F.gelu(_UpperCamelCase) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype) def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str) ->int: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.proj(_UpperCamelCase).chunk(2 , dim=-1) return hidden_states * self.gelu(_UpperCamelCase) class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int) ->List[str]: """simple docstring""" super().__init__() _lowerCamelCase : Any = nn.Linear(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any]) ->Tuple: """simple docstring""" _lowerCamelCase : Tuple = self.proj(_UpperCamelCase) return x * torch.sigmoid(1.7_0_2 * x) class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict) ->Any: """simple docstring""" super().__init__() _lowerCamelCase : Dict = nn.Embedding(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = nn.SiLU() _lowerCamelCase : Union[str, Any] = nn.Linear(_UpperCamelCase , embedding_dim * 2) _lowerCamelCase : Optional[Any] = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Any , _UpperCamelCase : List[Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase : List[str] = self.linear(self.silu(self.emb(_UpperCamelCase))) _lowerCamelCase , _lowerCamelCase : Optional[int] = torch.chunk(_UpperCamelCase , 2) _lowerCamelCase : Dict = self.norm(_UpperCamelCase) * (1 + scale) + shift return x class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]) ->Dict: """simple docstring""" super().__init__() _lowerCamelCase : str = CombinedTimestepLabelEmbeddings(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = nn.SiLU() _lowerCamelCase : Optional[Any] = nn.Linear(_UpperCamelCase , 6 * embedding_dim , bias=_UpperCamelCase) _lowerCamelCase : Union[str, Any] = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase , eps=1E-6) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict=None) ->int: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase , _UpperCamelCase , hidden_dtype=_UpperCamelCase))) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = emb.chunk(6 , dim=1) _lowerCamelCase : List[Any] = self.norm(_UpperCamelCase) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : float = 1E-5) ->List[str]: """simple docstring""" super().__init__() _lowerCamelCase : int = num_groups _lowerCamelCase : List[Any] = eps if act_fn is None: _lowerCamelCase : str = None else: _lowerCamelCase : List[Any] = get_activation(_UpperCamelCase) _lowerCamelCase : Any = nn.Linear(_UpperCamelCase , out_dim * 2) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : Dict) ->Dict: """simple docstring""" if self.act: _lowerCamelCase : Optional[Any] = self.act(_UpperCamelCase) _lowerCamelCase : Any = self.linear(_UpperCamelCase) _lowerCamelCase : str = emb[:, :, None, None] _lowerCamelCase , _lowerCamelCase : List[Any] = emb.chunk(2 , dim=1) _lowerCamelCase : str = F.group_norm(_UpperCamelCase , self.num_groups , eps=self.eps) _lowerCamelCase : List[Any] = x * (1 + scale) + shift return x
15
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
1
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def A__ ( __A , __A , __A=1E-12 ): '''simple docstring''' _lowerCamelCase : Tuple = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__A , axis=1 ) , a_min=__A ) ).T _lowerCamelCase : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__A , axis=1 ) , a_min=__A ) ).T return jnp.matmul(__A , norm_emb_a.T ) class __snake_case ( nn.Module ): '''simple docstring''' _snake_case = 42 _snake_case = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config) _lowerCamelCase : Dict = nn.Dense(self.config.projection_dim , use_bias=_UpperCamelCase , dtype=self.dtype) _lowerCamelCase : str = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim)) _lowerCamelCase : int = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim)) _lowerCamelCase : Any = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,)) _lowerCamelCase : Dict = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,)) def __call__( self : Any , _UpperCamelCase : Tuple) ->Any: """simple docstring""" _lowerCamelCase : Optional[Any] = self.vision_model(_UpperCamelCase)[1] _lowerCamelCase : int = self.visual_projection(_UpperCamelCase) _lowerCamelCase : int = jax_cosine_distance(_UpperCamelCase , self.special_care_embeds) _lowerCamelCase : Dict = jax_cosine_distance(_UpperCamelCase , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs _lowerCamelCase : Optional[Any] = 0.0 _lowerCamelCase : List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment _lowerCamelCase : List[str] = jnp.round(_UpperCamelCase , 3) _lowerCamelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=_UpperCamelCase) # Use a lower threshold if an image has any special care concept _lowerCamelCase : List[Any] = is_special_care * 0.0_1 _lowerCamelCase : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment _lowerCamelCase : Optional[Any] = jnp.round(_UpperCamelCase , 3) _lowerCamelCase : Union[str, Any] = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = CLIPConfig _snake_case = 'clip_input' _snake_case = FlaxStableDiffusionSafetyCheckerModule def __init__( self : int , _UpperCamelCase : CLIPConfig , _UpperCamelCase : Optional[Tuple] = None , _UpperCamelCase : int = 0 , _UpperCamelCase : jnp.dtype = jnp.floataa , _UpperCamelCase : bool = True , **_UpperCamelCase : List[Any] , ) ->Dict: """simple docstring""" if input_shape is None: _lowerCamelCase : int = (1, 224, 224, 3) _lowerCamelCase : Union[str, Any] = self.module_class(config=_UpperCamelCase , dtype=_UpperCamelCase , **_UpperCamelCase) super().__init__(_UpperCamelCase , _UpperCamelCase , input_shape=_UpperCamelCase , seed=_UpperCamelCase , dtype=_UpperCamelCase , _do_init=_do_init) def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : jax.random.KeyArray , _UpperCamelCase : Tuple , _UpperCamelCase : FrozenDict = None) ->FrozenDict: """simple docstring""" _lowerCamelCase : List[Any] = jax.random.normal(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase , _lowerCamelCase : Optional[Any] = jax.random.split(_UpperCamelCase) _lowerCamelCase : Tuple = {"""params""": params_rng, """dropout""": dropout_rng} _lowerCamelCase : Tuple = self.module.init(_UpperCamelCase , _UpperCamelCase)["""params"""] return random_params def __call__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : dict = None , ) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = jnp.transpose(_UpperCamelCase , (0, 2, 3, 1)) return self.module.apply( {"""params""": params or self.params} , jnp.array(_UpperCamelCase , dtype=jnp.floataa) , rngs={} , )
15
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = 0 for ch in input_str: _lowerCamelCase : Optional[Any] = ord(__A ) _lowerCamelCase : List[str] = pow(2 , __A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
15
1
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (UnCLIPScheduler,) def _SCREAMING_SNAKE_CASE ( self : int , **_UpperCamelCase : List[str]) ->int: """simple docstring""" _lowerCamelCase : Any = { """num_train_timesteps""": 1000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int) ->str: """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_UpperCamelCase , prev_timestep=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Any: """simple docstring""" _lowerCamelCase : Tuple = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type="""fixed_small_log""") _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.0_0_0_0E-1_0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_5_4_9_6_2_5)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.9_9_9_4_9_8_7)) < 1E-5 def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type="""learned_range""") _lowerCamelCase : Optional[Any] = scheduler_class(**_UpperCamelCase) _lowerCamelCase : int = 0.5 assert scheduler._get_variance(1 , predicted_variance=_UpperCamelCase) - -1_0.1_7_1_2_7_9_0 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=_UpperCamelCase) - -5.7_9_9_8_0_5_2 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=_UpperCamelCase) - -0.0_0_1_0_0_1_1 < 1E-5 def _SCREAMING_SNAKE_CASE ( self : Dict) ->int: """simple docstring""" _lowerCamelCase : Optional[Any] = self.scheduler_classes[0] _lowerCamelCase : List[str] = self.get_scheduler_config() _lowerCamelCase : Union[str, Any] = scheduler_class(**_UpperCamelCase) _lowerCamelCase : Tuple = scheduler.timesteps _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : List[str] = self.dummy_sample_deter _lowerCamelCase : Optional[int] = torch.manual_seed(0) for i, t in enumerate(_UpperCamelCase): # 1. predict noise residual _lowerCamelCase : Optional[int] = model(_UpperCamelCase , _UpperCamelCase) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase).prev_sample _lowerCamelCase : Union[str, Any] = pred_prev_sample _lowerCamelCase : int = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : int = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5) < 1E-2 assert abs(result_mean.item() - 0.3_2_8_4_7_4_3) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Optional[int] = self.scheduler_classes[0] _lowerCamelCase : Dict = self.get_scheduler_config() _lowerCamelCase : Optional[int] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(25) _lowerCamelCase : Dict = scheduler.timesteps _lowerCamelCase : Any = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter _lowerCamelCase : Dict = torch.manual_seed(0) for i, t in enumerate(_UpperCamelCase): # 1. predict noise residual _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) if i + 1 == timesteps.shape[0]: _lowerCamelCase : Optional[int] = None else: _lowerCamelCase : Any = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCamelCase : int = scheduler.step( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , prev_timestep=_UpperCamelCase , generator=_UpperCamelCase).prev_sample _lowerCamelCase : Union[str, Any] = pred_prev_sample _lowerCamelCase : Optional[int] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3) < 1E-2 assert abs(result_mean.item() - 0.3_3_6_2_0_3_8) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: """simple docstring""" pass
15
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""") _lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : str = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""") _lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
15
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device lowerCAmelCase : Union[str, Any] =False class __snake_case ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: """simple docstring""" _lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""") # remove text_unet pipe.remove_unused_weights() pipe.to(_UpperCamelCase) pipe.set_progress_bar_config(disable=_UpperCamelCase) _lowerCamelCase : Optional[Any] = """A painting of a squirrel eating a burger """ _lowerCamelCase : List[str] = torch.manual_seed(0) _lowerCamelCase : Tuple = pipe( prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""").images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_UpperCamelCase) _lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCamelCase) pipe.to(_UpperCamelCase) pipe.set_progress_bar_config(disable=_UpperCamelCase) _lowerCamelCase : List[Any] = generator.manual_seed(0) _lowerCamelCase : str = pipe( prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""").images assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass" def _SCREAMING_SNAKE_CASE ( self : str) ->str: """simple docstring""" _lowerCamelCase : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained( """shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa) pipe.to(_UpperCamelCase) pipe.set_progress_bar_config(disable=_UpperCamelCase) _lowerCamelCase : List[str] = """A painting of a squirrel eating a burger """ _lowerCamelCase : Union[str, Any] = torch.manual_seed(0) _lowerCamelCase : Union[str, Any] = pipe( prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""").images _lowerCamelCase : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Optional[int] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
15
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
1
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings lowerCAmelCase : Optional[int] =logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = field(default=__lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} ) _snake_case = field( default=__lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) _snake_case = field( default=__lowerCAmelCase , metadata={ 'help': ( 'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `max_length` value of the model configuration.' ) } , ) _snake_case = field( default=__lowerCAmelCase , metadata={ 'help': ( 'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `num_beams` value of the model configuration.' ) } , ) _snake_case = field( default=__lowerCAmelCase , metadata={ 'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.' } , ) def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple: """simple docstring""" _lowerCamelCase : int = super().to_dict() for k, v in d.items(): if isinstance(_UpperCamelCase , _UpperCamelCase): _lowerCamelCase : List[Any] = v.to_dict() return d
15
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
1
import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def A__ ( __A , __A , __A , __A , __A ): '''simple docstring''' # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _lowerCamelCase : Optional[int] = TapasConfig.from_json_file(__A ) # set absolute/relative position embeddings parameter _lowerCamelCase : List[str] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _lowerCamelCase : Optional[int] = TapasForQuestionAnswering(config=__A ) elif task == "WTQ": # run_task_main.py hparams _lowerCamelCase : int = 4 _lowerCamelCase : Union[str, Any] = True # hparam_utils.py hparams _lowerCamelCase : Any = 0.664_694 _lowerCamelCase : int = 0.207_951 _lowerCamelCase : Union[str, Any] = 0.121_194 _lowerCamelCase : int = True _lowerCamelCase : List[Any] = True _lowerCamelCase : List[str] = False _lowerCamelCase : Tuple = 0.0_352_513 _lowerCamelCase : Tuple = TapasForQuestionAnswering(config=__A ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _lowerCamelCase : Dict = 4 _lowerCamelCase : List[str] = False # hparam_utils.py hparams _lowerCamelCase : str = 36.4_519 _lowerCamelCase : Any = 0.903_421 _lowerCamelCase : int = 222.088 _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Any = 0.763_141 _lowerCamelCase : List[str] = TapasForQuestionAnswering(config=__A ) elif task == "TABFACT": _lowerCamelCase : List[str] = TapasForSequenceClassification(config=__A ) elif task == "MLM": _lowerCamelCase : int = TapasForMaskedLM(config=__A ) elif task == "INTERMEDIATE_PRETRAINING": _lowerCamelCase : Union[str, Any] = TapasModel(config=__A ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__A , __A , __A ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__A ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _lowerCamelCase : List[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__A ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase : List[str] =parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
15
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
1
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['image_processor', 'tokenizer'] _snake_case = 'BlipImageProcessor' _snake_case = 'AutoTokenizer' def __init__( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple) ->str: """simple docstring""" super().__init__(_UpperCamelCase , _UpperCamelCase) # add QFormer tokenizer _lowerCamelCase : Dict = qformer_tokenizer def __call__( self : Union[str, Any] , _UpperCamelCase : ImageInput = None , _UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Union[bool, str, PaddingStrategy] = False , _UpperCamelCase : Union[bool, str, TruncationStrategy] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Dict , ) ->BatchFeature: """simple docstring""" if images is None and text is None: raise ValueError("""You have to specify at least images or text.""") _lowerCamelCase : Tuple = BatchFeature() if text is not None: _lowerCamelCase : Union[str, Any] = self.tokenizer( text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , ) encoding.update(_UpperCamelCase) _lowerCamelCase : List[str] = self.qformer_tokenizer( text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : str = qformer_text_encoding.pop("""input_ids""") _lowerCamelCase : Any = qformer_text_encoding.pop("""attention_mask""") if images is not None: _lowerCamelCase : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase) encoding.update(_UpperCamelCase) return encoding def _SCREAMING_SNAKE_CASE ( self : int , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any]) ->List[str]: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple , *_UpperCamelCase : List[str] , **_UpperCamelCase : Any) ->List[Any]: """simple docstring""" return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]: """simple docstring""" _lowerCamelCase : int = self.tokenizer.model_input_names _lowerCamelCase : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , **_UpperCamelCase : Union[str, Any]) ->str: """simple docstring""" if os.path.isfile(_UpperCamelCase): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""") os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase) _lowerCamelCase : str = os.path.join(_UpperCamelCase , """qformer_tokenizer""") self.qformer_tokenizer.save_pretrained(_UpperCamelCase) return super().save_pretrained(_UpperCamelCase , **_UpperCamelCase) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , _UpperCamelCase : str , **_UpperCamelCase : int) ->List[str]: """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained(_UpperCamelCase , subfolder="""qformer_tokenizer""") _lowerCamelCase : str = cls._get_arguments_from_pretrained(_UpperCamelCase , **_UpperCamelCase) args.append(_UpperCamelCase) return cls(*_UpperCamelCase)
15
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
1
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=13 , _UpperCamelCase : str=7 , _UpperCamelCase : Any=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : str=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Dict=99 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : Tuple=5 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : str=37 , _UpperCamelCase : str="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : List[Any]=16 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Tuple=4 , ) ->List[str]: """simple docstring""" _lowerCamelCase : Tuple = parent _lowerCamelCase : int = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Optional[Any] = is_training _lowerCamelCase : Tuple = use_attention_mask _lowerCamelCase : Any = use_token_type_ids _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : Union[str, Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : List[str] = num_attention_heads _lowerCamelCase : List[str] = intermediate_size _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Dict = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : List[Any] = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Tuple = type_sequence_label_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : Dict = num_choices def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: """simple docstring""" _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowerCamelCase : Any = None if self.use_attention_mask: _lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : List[str] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCamelCase , ) return config, input_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: """simple docstring""" _lowerCamelCase : str = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = config_and_inputs _lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __snake_case ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[Any] = FlaxDistilBertModelTester(self) @slow def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: _lowerCamelCase : Tuple = model_class_name.from_pretrained("""distilbert-base-uncased""") _lowerCamelCase : List[str] = model(np.ones((1, 1))) self.assertIsNotNone(_UpperCamelCase) @require_flax class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""") _lowerCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) _lowerCamelCase : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) _lowerCamelCase : Union[str, Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase)[0] _lowerCamelCase : Optional[Any] = (1, 11, 768) self.assertEqual(output.shape , _UpperCamelCase) _lowerCamelCase : Dict = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]]) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCamelCase , atol=1E-4))
15
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = {} _lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""] _lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments) lowerCAmelCase : int =parser.parse_args() if args.num_workers is None: lowerCAmelCase : Any =multiprocessing.cpu_count() lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase : str =time.time() lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase : Dict =time.time() lowerCAmelCase : Dict =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
15
1
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 lowerCAmelCase : str =0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 lowerCAmelCase : str =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class __snake_case : '''simple docstring''' def __init__( self : List[str]) ->List[str]: """simple docstring""" _lowerCamelCase : Optional[int] = WATERMARK_BITS _lowerCamelCase : Any = WatermarkEncoder() self.encoder.set_watermark("""bits""" , self.watermark) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : torch.FloatTensor) ->Dict: """simple docstring""" if images.shape[-1] < 256: return images _lowerCamelCase : Dict = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy() _lowerCamelCase : str = [self.encoder.encode(_UpperCamelCase , """dwtDct""") for image in images] _lowerCamelCase : str = torch.from_numpy(np.array(_UpperCamelCase)).permute(0 , 3 , 1 , 2) _lowerCamelCase : Optional[int] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0) return images
15
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = IFPipeline _snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" return self._get_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : int = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa) _lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""") _lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""") del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCamelCase : str = None _lowerCamelCase : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components) _lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components) _lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : str = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Any = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : List[str] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def A__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
1
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) lowerCAmelCase : List[str] ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } lowerCAmelCase : List[str] ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } lowerCAmelCase : Any ={"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[Any] = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) _lowerCamelCase : Union[str, Any] = bs[:] _lowerCamelCase : Optional[Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(__A ) cs.append(2**8 + n ) n += 1 _lowerCamelCase : Optional[int] = [chr(__A ) for n in cs] return dict(zip(__A , __A ) ) def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = set() _lowerCamelCase : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _lowerCamelCase : Tuple = char return pairs class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ['input_ids', 'attention_mask'] def __init__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : int="replace" , _UpperCamelCase : Tuple="<s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : List[Any]="</s>" , _UpperCamelCase : Optional[Any]="<s>" , _UpperCamelCase : List[Any]="<unk>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Optional[Any]="<mask>" , _UpperCamelCase : Union[str, Any]=False , **_UpperCamelCase : str , ) ->List[Any]: """simple docstring""" _lowerCamelCase : Tuple = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else bos_token _lowerCamelCase : Union[str, Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else eos_token _lowerCamelCase : Optional[int] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else sep_token _lowerCamelCase : Union[str, Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else cls_token _lowerCamelCase : Tuple = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else unk_token _lowerCamelCase : str = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else pad_token # Mask token behave like a normal word, i.e. include the space before it _lowerCamelCase : Union[str, Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else mask_token super().__init__( errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , ) with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle: _lowerCamelCase : Any = json.load(_UpperCamelCase) _lowerCamelCase : Optional[int] = {v: k for k, v in self.encoder.items()} _lowerCamelCase : Optional[int] = errors # how to handle errors in decoding _lowerCamelCase : Dict = bytes_to_unicode() _lowerCamelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(_UpperCamelCase , encoding="""utf-8""") as merges_handle: _lowerCamelCase : Optional[Any] = merges_handle.read().split("""\n""")[1:-1] _lowerCamelCase : Any = [tuple(merge.split()) for merge in bpe_merges] _lowerCamelCase : Any = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase)))) _lowerCamelCase : Tuple = {} _lowerCamelCase : int = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _lowerCamelCase : Any = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : str) ->str: """simple docstring""" return len(self.encoder) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Tuple) ->Optional[Any]: """simple docstring""" if token in self.cache: return self.cache[token] _lowerCamelCase : List[str] = tuple(_UpperCamelCase) _lowerCamelCase : List[Any] = get_pairs(_UpperCamelCase) if not pairs: return token while True: _lowerCamelCase : List[Any] = min(_UpperCamelCase , key=lambda _UpperCamelCase: self.bpe_ranks.get(_UpperCamelCase , float("""inf"""))) if bigram not in self.bpe_ranks: break _lowerCamelCase , _lowerCamelCase : List[str] = bigram _lowerCamelCase : Dict = [] _lowerCamelCase : str = 0 while i < len(_UpperCamelCase): try: _lowerCamelCase : int = word.index(_UpperCamelCase , _UpperCamelCase) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) _lowerCamelCase : List[str] = j if word[i] == first and i < len(_UpperCamelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 _lowerCamelCase : Any = tuple(_UpperCamelCase) _lowerCamelCase : List[str] = new_word if len(_UpperCamelCase) == 1: break else: _lowerCamelCase : Optional[Any] = get_pairs(_UpperCamelCase) _lowerCamelCase : Optional[Any] = """ """.join(_UpperCamelCase) _lowerCamelCase : List[Any] = word return word def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Optional[Any]) ->str: """simple docstring""" _lowerCamelCase : int = [] for token in re.findall(self.pat , _UpperCamelCase): _lowerCamelCase : Tuple = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase).split(""" """)) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int]) ->Optional[int]: """simple docstring""" return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : int) ->Union[str, Any]: """simple docstring""" return self.decoder.get(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Any) ->List[Any]: """simple docstring""" _lowerCamelCase : str = """""".join(_UpperCamelCase) _lowerCamelCase : Tuple = bytearray([self.byte_decoder[c] for c in text]).decode("""utf-8""" , errors=self.errors) return text def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return _lowerCamelCase : Union[str, Any] = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) _lowerCamelCase : Dict = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""]) with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""") _lowerCamelCase : Optional[Any] = 0 with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as writer: writer.write("""#version: 0.2\n""") for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase: kv[1]): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""") _lowerCamelCase : Optional[Any] = token_index writer.write(""" """.join(_UpperCamelCase) + """\n""") index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase)) + [1] return [1] + ([0] * len(_UpperCamelCase)) + [1, 1] + ([0] * len(_UpperCamelCase)) + [1] def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]: """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.sep_token_id] _lowerCamelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[Any]=False , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase) > 0 and not text[0].isspace()): _lowerCamelCase : List[str] = """ """ + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->Any: """simple docstring""" return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : "Conversation") ->List[int]: """simple docstring""" _lowerCamelCase : str = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(""" """ + text) else: # Generated responses should contain them already. inputs.append(_UpperCamelCase) _lowerCamelCase : Any = """ """.join(_UpperCamelCase) _lowerCamelCase : Optional[Any] = self.encode(_UpperCamelCase) if len(_UpperCamelCase) > self.model_max_length: _lowerCamelCase : List[str] = input_ids[-self.model_max_length :] logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""") return input_ids
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Any =logging.get_logger(__name__) lowerCAmelCase : List[Any] ={ "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = 'swin' _snake_case = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[str] = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : Union[str, Any] = embed_dim _lowerCamelCase : str = depths _lowerCamelCase : str = len(_UpperCamelCase) _lowerCamelCase : Optional[Any] = num_heads _lowerCamelCase : Tuple = window_size _lowerCamelCase : int = mlp_ratio _lowerCamelCase : Optional[int] = qkv_bias _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Dict = use_absolute_embeddings _lowerCamelCase : int = layer_norm_eps _lowerCamelCase : str = initializer_range _lowerCamelCase : Dict = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1)) _lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)] _lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices( out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = version.parse('1.11' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float: """simple docstring""" return 1E-4
15
1
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : List[Any] =get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model") @require_sentencepiece @require_tokenizers class __snake_case ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = GPTSwaTokenizer _snake_case = False _snake_case = True _snake_case = False def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : Dict = GPTSwaTokenizer(_UpperCamelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""") tokenizer.save_pretrained(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : List[str]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Optional[int] = """This is a test""" _lowerCamelCase : Any = """This is a test""" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" _lowerCamelCase : List[str] = """<s>""" _lowerCamelCase : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase) , _UpperCamelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase) , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: """simple docstring""" _lowerCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<unk>""") self.assertEqual(vocab_keys[1] , """<s>""") self.assertEqual(vocab_keys[-1] , """j""") self.assertEqual(len(_UpperCamelCase) , 2000) def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2000) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: """simple docstring""" _lowerCamelCase : Optional[Any] = GPTSwaTokenizer(_UpperCamelCase) _lowerCamelCase : Optional[int] = tokenizer.tokenize("""This is a test""") self.assertListEqual(_UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase) , [465, 287, 265, 631, 842]) _lowerCamelCase : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") # fmt: off self.assertListEqual( _UpperCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , ) # fmt: on _lowerCamelCase : Any = tokenizer.convert_tokens_to_ids(_UpperCamelCase) self.assertListEqual( _UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) _lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase) # fmt: off self.assertListEqual( _UpperCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""]) # fmt: on def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str: """simple docstring""" _lowerCamelCase : List[str] = GPTSwaTokenizer(_UpperCamelCase) _lowerCamelCase : Optional[int] = ["""This is a test""", """I was born in 92000, and this is falsé."""] _lowerCamelCase : Optional[Any] = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_UpperCamelCase , _UpperCamelCase): self.assertListEqual(tokenizer.encode_fast(_UpperCamelCase) , _UpperCamelCase) # Test that decode_fast returns the input text for text, token_ids in zip(_UpperCamelCase , _UpperCamelCase): self.assertEqual(tokenizer.decode_fast(_UpperCamelCase) , _UpperCamelCase) @slow def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: """simple docstring""" _lowerCamelCase : Dict = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off _lowerCamelCase : List[str] = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCamelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=_UpperCamelCase , )
15
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (EulerDiscreteScheduler,) _snake_case = 10 def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : str = torch.manual_seed(0) _lowerCamelCase : str = self.dummy_model() _lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : int = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Dict = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""") _lowerCamelCase : int = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : Dict = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.prev_sample _lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Tuple = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : List[Any] = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : int = output.prev_sample _lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
15
1
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests lowerCAmelCase : List[Any] =open # noqa: we just need to have a builtin inside this module to test it properly
15
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : Dict ={"vocab_file": "vocab.json"} lowerCAmelCase : List[str] ={ "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } lowerCAmelCase : int ={"mgp-str": 27} class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]: """simple docstring""" super().__init__( unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , ) with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle: _lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase) _lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()} @property def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" return len(self.vocab) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = [] for s in text: char_tokens.extend(_UpperCamelCase) return char_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict: """simple docstring""" return self.decoder.get(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase)) return _lowerCamelCase : Tuple = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""") return (vocab_file,)
15
1
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Tuple = ["""a""", """b""", """c"""] # Defaults to last layer if both are None _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""c"""]) self.assertEqual(_UpperCamelCase , [2]) # Out indices set to match out features _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features set to match out indices _lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features selected from negative indices _lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [-3, -1]) def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase) # Out features must be a list with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""]) # Out features must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""]) # Out indices must be a list or tuple with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""]) # Out indices must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""]) # Out features and out indices must be the same length with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""]) # Out features should match out indices with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""]) # Out features and out indices should be in order with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""]) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""]) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : int = BackboneMixin() _lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""] _lowerCamelCase : Tuple = ["""a""", """c"""] _lowerCamelCase : List[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly _lowerCamelCase : str = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""]) self.assertEqual(backbone.out_indices , [0, 1]) _lowerCamelCase : Optional[int] = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [-3, -1])
15
1
from ...configuration_utils import PretrainedConfig lowerCAmelCase : str ={ "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = 'tapas' def __init__( self : Optional[Any] , _UpperCamelCase : Dict=3_0522 , _UpperCamelCase : int=768 , _UpperCamelCase : int=12 , _UpperCamelCase : List[Any]=12 , _UpperCamelCase : List[Any]=3072 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=1024 , _UpperCamelCase : str=[3, 256, 256, 2, 256, 256, 10] , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=1E-1_2 , _UpperCamelCase : Dict=0 , _UpperCamelCase : int=1_0.0 , _UpperCamelCase : Dict=0 , _UpperCamelCase : List[Any]=1.0 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : str=1.0 , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[Any]=1.0 , _UpperCamelCase : Any=1.0 , _UpperCamelCase : Tuple=False , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Any="ratio" , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=None , _UpperCamelCase : Dict=64 , _UpperCamelCase : int=32 , _UpperCamelCase : List[Any]=False , _UpperCamelCase : List[str]=True , _UpperCamelCase : int=False , _UpperCamelCase : Dict=False , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=False , _UpperCamelCase : str=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : int , ) ->Optional[Any]: """simple docstring""" super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : Any = num_attention_heads _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : Tuple = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Union[str, Any] = max_position_embeddings _lowerCamelCase : Optional[Any] = type_vocab_sizes _lowerCamelCase : int = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps # Fine-tuning task hyperparameters _lowerCamelCase : List[Any] = positive_label_weight _lowerCamelCase : int = num_aggregation_labels _lowerCamelCase : Any = aggregation_loss_weight _lowerCamelCase : str = use_answer_as_supervision _lowerCamelCase : int = answer_loss_importance _lowerCamelCase : List[Any] = use_normalized_answer_loss _lowerCamelCase : int = huber_loss_delta _lowerCamelCase : Dict = temperature _lowerCamelCase : List[str] = aggregation_temperature _lowerCamelCase : Tuple = use_gumbel_for_cells _lowerCamelCase : Optional[int] = use_gumbel_for_aggregation _lowerCamelCase : Optional[int] = average_approximation_function _lowerCamelCase : Optional[Any] = cell_selection_preference _lowerCamelCase : Optional[Any] = answer_loss_cutoff _lowerCamelCase : List[str] = max_num_rows _lowerCamelCase : Optional[int] = max_num_columns _lowerCamelCase : List[Any] = average_logits_per_cell _lowerCamelCase : Dict = select_one_column _lowerCamelCase : Optional[int] = allow_empty_column_selection _lowerCamelCase : List[Any] = init_cell_selection_weights_to_zero _lowerCamelCase : List[Any] = reset_position_index_per_cell _lowerCamelCase : Tuple = disable_per_token_loss # Aggregation hyperparameters _lowerCamelCase : Optional[int] = aggregation_labels _lowerCamelCase : Any = no_aggregation_label_index if isinstance(self.aggregation_labels , _UpperCamelCase): _lowerCamelCase : int = {int(_UpperCamelCase): v for k, v in aggregation_labels.items()}
15
import math def A__ ( __A ): '''simple docstring''' assert isinstance(__A , __A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def A__ ( __A , __A=1 , **__A ): '''simple docstring''' _lowerCamelCase : Dict = factor * value _lowerCamelCase : str = value while not is_prime(__A ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **__A ) return value
15
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) def A__ ( __A , __A , __A , __A ): '''simple docstring''' def constraint_to_multiple_of(__A , __A , __A=0 , __A=None ): _lowerCamelCase : Optional[int] = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowerCamelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: _lowerCamelCase : Union[str, Any] = math.ceil(val / multiple ) * multiple return x _lowerCamelCase : str = (output_size, output_size) if isinstance(__A , __A ) else output_size _lowerCamelCase , _lowerCamelCase : Optional[int] = get_image_size(__A ) _lowerCamelCase , _lowerCamelCase : int = output_size # determine new height and width _lowerCamelCase : Dict = output_height / input_height _lowerCamelCase : Union[str, Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowerCamelCase : Optional[int] = scale_width else: # fit height _lowerCamelCase : List[Any] = scale_height _lowerCamelCase : str = constraint_to_multiple_of(scale_height * input_height , multiple=__A ) _lowerCamelCase : Tuple = constraint_to_multiple_of(scale_width * input_width , multiple=__A ) return (new_height, new_width) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = False , _UpperCamelCase : int = 1 , _UpperCamelCase : bool = True , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : List[str] , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Dict = size if size is not None else {"""height""": 384, """width""": 384} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase) _lowerCamelCase : Optional[int] = do_resize _lowerCamelCase : Any = size _lowerCamelCase : List[Any] = keep_aspect_ratio _lowerCamelCase : Optional[Any] = ensure_multiple_of _lowerCamelCase : Dict = resample _lowerCamelCase : Any = do_rescale _lowerCamelCase : Any = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : bool = False , _UpperCamelCase : int = 1 , _UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : List[Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""") _lowerCamelCase : List[Any] = get_resize_output_image_size( _UpperCamelCase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=_UpperCamelCase , multiple=_UpperCamelCase , ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[int] , ) ->int: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : int , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : int = None , _UpperCamelCase : bool = None , _UpperCamelCase : int = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : Dict , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Tuple = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : Tuple = size if size is not None else self.size _lowerCamelCase : Any = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowerCamelCase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowerCamelCase : int = resample if resample is not None else self.resample _lowerCamelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Union[str, Any] = image_std if image_std is not None else self.image_std _lowerCamelCase : str = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Union[str, Any] = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Any = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : int = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : str = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[Tuple] = None) ->List[str]: """simple docstring""" _lowerCamelCase : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_UpperCamelCase) != len(_UpperCamelCase): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""") if is_torch_tensor(_UpperCamelCase): _lowerCamelCase : Any = target_sizes.numpy() _lowerCamelCase : Optional[Any] = [] for idx in range(len(_UpperCamelCase)): _lowerCamelCase : Union[str, Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_UpperCamelCase) _lowerCamelCase : Union[str, Any] = resized_logits[0].argmax(dim=0) semantic_segmentation.append(_UpperCamelCase) else: _lowerCamelCase : Optional[int] = logits.argmax(dim=1) _lowerCamelCase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
15
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple: """simple docstring""" super().__init__(*_UpperCamelCase , **_UpperCamelCase) requires_backends(self , """vision""") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[int] = {} if top_k is not None: _lowerCamelCase : str = top_k return {}, {}, postprocess_params def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict: """simple docstring""" return super().__call__(_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Tuple = load_image(_UpperCamelCase) _lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework) return model_inputs def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Any = self.model(**_UpperCamelCase) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str: """simple docstring""" if top_k > self.model.config.num_labels: _lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": _lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0] _lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase) elif self.framework == "tf": _lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0] _lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase) _lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"""Unsupported framework: {self.framework}""") _lowerCamelCase : str = scores.tolist() _lowerCamelCase : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
15
1
from PIL import Image def A__ ( __A , __A ): '''simple docstring''' def brightness(__A ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__A ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 lowerCAmelCase : str =change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
15
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = ViTImageProcessor if is_vision_available() else None @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = (3, 32, 128) _lowerCamelCase : str = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase)))) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(_UpperCamelCase) + """\n""") _lowerCamelCase : Any = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) _lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) return image_input def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0) _lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : int = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""") _lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Optional[int] = """test""" _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase) _lowerCamelCase : Dict = tokenizer(_UpperCamelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = """test""" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase): processor() def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Any = processor.char_decode(_UpperCamelCase) _lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase) _lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = None _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = torch.randn(1 , 27 , 38) _lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257) _lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522) _lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
15
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCAmelCase : Any =logging.get_logger(__name__) class __snake_case ( _lowerCamelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[int] , _UpperCamelCase : Dict = True , _UpperCamelCase : str = None , _UpperCamelCase : Optional[int] = PILImageResampling.BILINEAR , _UpperCamelCase : Dict = True , _UpperCamelCase : str = None , _UpperCamelCase : str = True , _UpperCamelCase : str = 1 / 255 , _UpperCamelCase : List[str] = True , _UpperCamelCase : Any = None , _UpperCamelCase : List[Any] = None , **_UpperCamelCase : List[Any] , ) ->None: """simple docstring""" super().__init__(**A__) _lowerCamelCase : Any = size if size is not None else {"""shortest_edge""": 256} _lowerCamelCase : List[str] = get_size_dict(A__ , default_to_square=A__) _lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Union[str, Any] = get_size_dict(A__) _lowerCamelCase : Tuple = do_resize _lowerCamelCase : List[str] = size _lowerCamelCase : Union[str, Any] = resample _lowerCamelCase : Union[str, Any] = do_center_crop _lowerCamelCase : Union[str, Any] = crop_size _lowerCamelCase : str = do_rescale _lowerCamelCase : str = rescale_factor _lowerCamelCase : Tuple = do_normalize _lowerCamelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] = PILImageResampling.BICUBIC , _UpperCamelCase : Union[str, Any] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Any = get_size_dict(A__ , default_to_square=A__) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""") _lowerCamelCase : Tuple = get_resize_output_image_size(A__ , size=size["""shortest_edge"""] , default_to_square=A__) return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict = None , **_UpperCamelCase : Optional[Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : int = get_size_dict(A__) return center_crop(A__ , size=(size["""height"""], size["""width"""]) , data_format=A__ , **A__) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] = None , **_UpperCamelCase : Any) ->np.ndarray: """simple docstring""" return rescale(A__ , scale=A__ , data_format=A__ , **A__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Any = None , **_UpperCamelCase : Optional[int] , ) ->np.ndarray: """simple docstring""" return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : List[str] = None , _UpperCamelCase : Tuple = None , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : int = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : List[str] = None , _UpperCamelCase : List[Any] = None , _UpperCamelCase : str = None , _UpperCamelCase : Union[str, Any] = None , _UpperCamelCase : Optional[Any] = ChannelDimension.FIRST , **_UpperCamelCase : Dict , ) ->Tuple: """simple docstring""" _lowerCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : Union[str, Any] = size if size is not None else self.size _lowerCamelCase : List[Any] = get_size_dict(A__ , default_to_square=A__) _lowerCamelCase : Tuple = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Dict = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Optional[int] = get_size_dict(A__) _lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : Tuple = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = make_list_of_images(A__) if not valid_images(A__): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Optional[Any] = [to_numpy_array(A__) for image in images] if do_resize: _lowerCamelCase : Dict = [self.resize(image=A__ , size=A__ , resample=A__) for image in images] if do_center_crop: _lowerCamelCase : Tuple = [self.center_crop(image=A__ , size=A__) for image in images] if do_rescale: _lowerCamelCase : Any = [self.rescale(image=A__ , scale=A__) for image in images] if do_normalize: _lowerCamelCase : Optional[int] = [self.normalize(image=A__ , mean=A__ , std=A__) for image in images] _lowerCamelCase : Optional[int] = [to_channel_dimension_format(A__ , A__) for image in images] _lowerCamelCase : Dict = {"""pixel_values""": images} return BatchFeature(data=A__ , tensor_type=A__)
700
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__A , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__A ) return parser.parse_args() def A__ ( ): '''simple docstring''' _lowerCamelCase : List[str] = parse_args() # Import training_script as a module. _lowerCamelCase : List[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowerCamelCase : Optional[Any] = script_fpath.stem _lowerCamelCase : Dict = importlib.import_module(__A ) # Patch sys.argv _lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
15
0
from datetime import datetime import requests def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url=""" _lowerCamelCase : Any = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""] return requests.get(__A ).content if __name__ == "__main__": lowerCAmelCase : int =input("Enter Video/IGTV url: ").strip() lowerCAmelCase : str =F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
701
def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) if n == 0: return 0 _lowerCamelCase : Tuple = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) _lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def A__ ( __A , __A , __A ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCamelCase : int = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) _lowerCamelCase : Optional[Any] = max_revenue return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _lowerCamelCase : Any = 0 for i in range(1 , n + 1 ): _lowerCamelCase : Any = max_rev[i] for j in range(1 , i + 1 ): _lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) _lowerCamelCase : int = max_revenue_i return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' if n < 0: _lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): _lowerCamelCase : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ F"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = [6, 10, 12, 15, 20, 23] _lowerCamelCase : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCamelCase : Tuple = 36 _lowerCamelCase : Any = top_down_cut_rod(__A , __A ) _lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A ) _lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
15
0
'''simple docstring''' from graphs.minimum_spanning_tree_kruskal import kruskal def A__ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = 9 _lowerCamelCase : Union[str, Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : List[Any] = kruskal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCamelCase : List[Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_SCREAMING_SNAKE_CASE ) == sorted(_SCREAMING_SNAKE_CASE )
702
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
0
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __snake_case ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' _snake_case = 0 _snake_case = False _snake_case = 3.0 class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: """simple docstring""" self.assertDictEqual(MockClass().to_kwargs() , {}) self.assertDictEqual(MockClass(a=2).to_kwargs() , {"""a""": 2}) self.assertDictEqual(MockClass(a=2 , b=A_).to_kwargs() , {"""a""": 2, """b""": True}) self.assertDictEqual(MockClass(a=2 , c=2.2_5).to_kwargs() , {"""a""": 2, """c""": 2.2_5}) @require_cuda def _SCREAMING_SNAKE_CASE ( self : Dict) ->int: """simple docstring""" _lowerCamelCase : Optional[int] = GradScalerKwargs(init_scale=1024 , growth_factor=2) AcceleratorState._reset_state() _lowerCamelCase : Dict = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler]) print(accelerator.use_fpaa) _lowerCamelCase : Union[str, Any] = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_0_2_4.0) self.assertEqual(scaler._growth_factor , 2.0) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5) self.assertEqual(scaler._growth_interval , 2000) self.assertEqual(scaler._enabled , A_) @require_multi_gpu def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase : Tuple = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__)] execute_subprocess_async(A_ , env=os.environ.copy()) if __name__ == "__main__": lowerCAmelCase : Optional[Any] =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase : Optional[Any] =Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase : List[str] =torch.nn.Linear(100, 200) lowerCAmelCase : Optional[Any] =accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase : Dict ="" lowerCAmelCase : Any =model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
703
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def A__ ( __A ): '''simple docstring''' _lowerCamelCase : int = 384 _lowerCamelCase : Union[str, Any] = 7 if "tiny" in model_name: _lowerCamelCase : Optional[Any] = 96 _lowerCamelCase : Dict = (2, 2, 6, 2) _lowerCamelCase : Dict = (3, 6, 12, 24) elif "small" in model_name: _lowerCamelCase : Union[str, Any] = 96 _lowerCamelCase : Dict = (2, 2, 18, 2) _lowerCamelCase : Dict = (3, 6, 12, 24) elif "base" in model_name: _lowerCamelCase : Any = 128 _lowerCamelCase : Optional[Any] = (2, 2, 18, 2) _lowerCamelCase : str = (4, 8, 16, 32) _lowerCamelCase : List[Any] = 12 _lowerCamelCase : Any = 512 elif "large" in model_name: _lowerCamelCase : List[str] = 192 _lowerCamelCase : List[Any] = (2, 2, 18, 2) _lowerCamelCase : Union[str, Any] = (6, 12, 24, 48) _lowerCamelCase : int = 12 _lowerCamelCase : int = 768 # set label information _lowerCamelCase : List[Any] = 150 _lowerCamelCase : List[str] = 'huggingface/label-files' _lowerCamelCase : str = 'ade20k-id2label.json' _lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : Optional[Any] = {int(_lowercase ): v for k, v in idalabel.items()} _lowerCamelCase : str = {v: k for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = SwinConfig( embed_dim=_lowercase , depths=_lowercase , num_heads=_lowercase , window_size=_lowercase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) _lowerCamelCase : Tuple = UperNetConfig( backbone_config=_lowercase , auxiliary_in_channels=_lowercase , num_labels=_lowercase , idalabel=_lowercase , labelaid=_lowercase , ) return config def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Any = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : Optional[int] = dct.pop(_lowercase ) _lowerCamelCase : List[str] = val def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowerCamelCase : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowerCamelCase : Optional[int] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) _lowerCamelCase : str = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Dict = in_proj_weight[:dim, :] _lowerCamelCase : List[str] = in_proj_bias[: dim] _lowerCamelCase : Dict = in_proj_weight[ dim : dim * 2, : ] _lowerCamelCase : List[Any] = in_proj_bias[ dim : dim * 2 ] _lowerCamelCase : Optional[int] = in_proj_weight[ -dim :, : ] _lowerCamelCase : int = in_proj_bias[-dim :] # fmt: on def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Dict = x.shape _lowerCamelCase : Dict = x.reshape(_lowercase , 4 , in_channel // 4 ) _lowerCamelCase : Optional[Any] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_lowercase , _lowercase ) return x def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Optional[int] = x.shape _lowerCamelCase : Optional[Any] = x.reshape(_lowercase , in_channel // 4 , 4 ) _lowerCamelCase : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_lowercase , _lowercase ) return x def A__ ( __A ): '''simple docstring''' _lowerCamelCase : str = x.shape[0] _lowerCamelCase : List[Any] = x.reshape(4 , in_channel // 4 ) _lowerCamelCase : Union[str, Any] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_lowercase ) return x def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Dict = x.shape[0] _lowerCamelCase : Any = x.reshape(in_channel // 4 , 4 ) _lowerCamelCase : List[str] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_lowercase ) return x def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : Dict = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } _lowerCamelCase : Dict = model_name_to_url[model_name] _lowerCamelCase : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , file_name=_lowercase )[ 'state_dict' ] for name, param in state_dict.items(): print(_lowercase , param.shape ) _lowerCamelCase : str = get_upernet_config(_lowercase ) _lowerCamelCase : List[Any] = UperNetForSemanticSegmentation(_lowercase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _lowerCamelCase : Tuple = state_dict.pop(_lowercase ) if "bn" in key: _lowerCamelCase : str = key.replace("""bn""" , """batch_norm""" ) _lowerCamelCase : Union[str, Any] = val # rename keys _lowerCamelCase : str = create_rename_keys(_lowercase ) for src, dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) read_in_q_k_v(_lowercase , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: _lowerCamelCase : Any = reverse_correct_unfold_reduction_order(_lowercase ) if "norm" in key: _lowerCamelCase : Tuple = reverse_correct_unfold_norm_order(_lowercase ) model.load_state_dict(_lowercase ) # verify on image _lowerCamelCase : List[str] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' _lowerCamelCase : List[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert("""RGB""" ) _lowerCamelCase : str = SegformerImageProcessor() _lowerCamelCase : Optional[int] = processor(_lowercase , return_tensors="""pt""" ).pixel_values with torch.no_grad(): _lowerCamelCase : str = model(_lowercase ) _lowerCamelCase : Tuple = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": _lowerCamelCase : Dict = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": _lowerCamelCase : Union[str, Any] = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": _lowerCamelCase : int = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": _lowerCamelCase : Any = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_lowercase ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[F"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
704
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
0
from __future__ import annotations def A__ ( __A , __A = None , __A = None , __A = False , ): '''simple docstring''' _lowerCamelCase : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) _lowerCamelCase : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary _lowerCamelCase : Optional[int] = frequencies_dict if not case_sensitive: _lowerCamelCase : str = ciphertext.lower() # Chi squared statistic values _lowerCamelCase : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): _lowerCamelCase : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet _lowerCamelCase : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter _lowerCamelCase : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: _lowerCamelCase : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message _lowerCamelCase : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _lowerCamelCase : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula _lowerCamelCase : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message _lowerCamelCase : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _lowerCamelCase : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula _lowerCamelCase : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary _lowerCamelCase : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(__A ) -> tuple[float, str]: return chi_squared_statistic_values[key] _lowerCamelCase : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( _lowerCamelCase ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
705
from __future__ import annotations lowerCAmelCase : int =[] def A__ ( __A , __A , __A ): '''simple docstring''' for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def A__ ( __A , __A ): '''simple docstring''' if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): _lowerCamelCase : int = 1 solve(__A , row + 1 ) _lowerCamelCase : List[str] = 0 return False def A__ ( __A ): '''simple docstring''' for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) lowerCAmelCase : int =8 lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
15
0
lowerCAmelCase : Dict ={"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} lowerCAmelCase : Any =["a", "b", "c", "d", "e"] def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : Optional[int] = start # add current to visited visited.append(lowerCamelCase_ ) _lowerCamelCase : Optional[Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: _lowerCamelCase : Optional[int] = topological_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # if all neighbors visited add current to sort sort.append(lowerCamelCase_ ) # if all vertices haven't been visited select a new one to visit if len(lowerCamelCase_ ) != len(lowerCamelCase_ ): for vertice in vertices: if vertice not in visited: _lowerCamelCase : Optional[int] = topological_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # return sort return sort if __name__ == "__main__": lowerCAmelCase : List[Any] =topological_sort("a", [], []) print(sort)
706
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
0
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) def A__ ( __A , __A ): _lowerCamelCase : Dict = RobertaPreLayerNormConfig.from_pretrained( __A , architectures=["""RobertaPreLayerNormForMaskedLM"""] ) # convert state_dict _lowerCamelCase : Dict = torch.load(hf_hub_download(repo_id=__A , filename="""pytorch_model.bin""" ) ) _lowerCamelCase : Union[str, Any] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("""roberta.""" ): _lowerCamelCase : Optional[Any] = 'roberta_prelayernorm.' + tensor_key[len("""roberta.""" ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ): continue _lowerCamelCase : int = tensor_value _lowerCamelCase : str = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__A , config=__A , state_dict=__A ) model.save_pretrained(__A ) # convert tokenizer _lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(__A ) tokenizer.save_pretrained(__A ) if __name__ == "__main__": lowerCAmelCase : str =argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint-repo", default=None, type=str, required=True, help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase : str =parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
707
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = 0 for ch in input_str: _lowerCamelCase : Optional[Any] = ord(__A ) _lowerCamelCase : List[str] = pow(2 , __A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
15
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : List[Any] ={ "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] =[ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple =[ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase : str =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
708
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""") _lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : str = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""") _lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
15
0
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=13 , _UpperCamelCase : Optional[int]=7 , _UpperCamelCase : str=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Any=True , _UpperCamelCase : int=True , _UpperCamelCase : Union[str, Any]=99 , _UpperCamelCase : Any=32 , _UpperCamelCase : Dict=5 , _UpperCamelCase : int=4 , _UpperCamelCase : List[str]=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : Any=16 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : List[Any]=0.0_2 , _UpperCamelCase : List[str]=4 , ) ->Dict: """simple docstring""" _lowerCamelCase : Optional[int] = parent _lowerCamelCase : Optional[Any] = batch_size _lowerCamelCase : Any = seq_length _lowerCamelCase : Dict = is_training _lowerCamelCase : Any = use_attention_mask _lowerCamelCase : Tuple = use_token_type_ids _lowerCamelCase : Optional[Any] = use_labels _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : Optional[Any] = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : Optional[Any] = type_vocab_size _lowerCamelCase : List[Any] = type_sequence_label_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : str = num_choices def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" _lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowerCamelCase : Any = None if self.use_attention_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Dict = None if self.use_token_type_ids: _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _lowerCamelCase : List[str] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Any = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = config_and_inputs _lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __snake_case ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = True _snake_case = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: """simple docstring""" _lowerCamelCase : Optional[Any] = FlaxRoFormerModelTester(self) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for model_class_name in self.all_model_classes: _lowerCamelCase : Optional[int] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCAmelCase) _lowerCamelCase : int = model(np.ones((1, 1))) self.assertIsNotNone(_lowerCAmelCase) @require_flax class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""") _lowerCamelCase : int = jnp.array([[0, 1, 2, 3, 4, 5]]) _lowerCamelCase : str = model(_lowerCAmelCase)[0] _lowerCamelCase : int = 5_0000 _lowerCamelCase : Union[str, Any] = (1, 6, vocab_size) self.assertEqual(output.shape , _lowerCAmelCase) _lowerCamelCase : Dict = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4))
709
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[int] ={ "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any =[ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
710
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
0
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = StableUnCLIPPipeline _snake_case = TEXT_TO_IMAGE_PARAMS _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _snake_case = False def _SCREAMING_SNAKE_CASE ( self : str) ->int: """simple docstring""" _lowerCamelCase : List[str] = 32 _lowerCamelCase : Dict = embedder_hidden_size # prior components torch.manual_seed(0) _lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") torch.manual_seed(0) _lowerCamelCase : List[str] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )) torch.manual_seed(0) _lowerCamelCase : int = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , ) torch.manual_seed(0) _lowerCamelCase : List[Any] = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0) _lowerCamelCase : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=__A) _lowerCamelCase : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""") torch.manual_seed(0) _lowerCamelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") torch.manual_seed(0) _lowerCamelCase : Any = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )) torch.manual_seed(0) _lowerCamelCase : List[Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , ) torch.manual_seed(0) _lowerCamelCase : List[Any] = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=__A , steps_offset=1 , ) torch.manual_seed(0) _lowerCamelCase : Union[str, Any] = AutoencoderKL() _lowerCamelCase : str = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Any , _UpperCamelCase : List[str]=0) ->Tuple: """simple docstring""" if str(__A).startswith("""mps"""): _lowerCamelCase : List[str] = torch.manual_seed(__A) else: _lowerCamelCase : List[Any] = torch.Generator(device=__A).manual_seed(__A) _lowerCamelCase : List[str] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : int = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=__A) def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: """simple docstring""" _lowerCamelCase : int = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=__A) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple: """simple docstring""" _lowerCamelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""") _lowerCamelCase : List[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa) pipe.to(__A) pipe.set_progress_bar_config(disable=__A) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Tuple = pipe("""anime turle""" , generator=__A , output_type="""np""") _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__A , __A) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCamelCase : Dict = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa) _lowerCamelCase : Optional[Any] = pipe.to(__A) pipe.set_progress_bar_config(disable=__A) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _lowerCamelCase : List[str] = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
711
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
0
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __snake_case : '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=99 , _UpperCamelCase : Tuple=13 , _UpperCamelCase : Any=7 , _UpperCamelCase : str=9 , _UpperCamelCase : int=True , _UpperCamelCase : Any=True , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Union[str, Any]=5 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Tuple=37 , _UpperCamelCase : int=8 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : List[str]=0.0_0_2 , _UpperCamelCase : Any=1 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ) ->Any: """simple docstring""" _lowerCamelCase : Dict = parent _lowerCamelCase : Union[str, Any] = batch_size _lowerCamelCase : Tuple = encoder_seq_length _lowerCamelCase : str = decoder_seq_length # For common tests _lowerCamelCase : Optional[int] = self.decoder_seq_length _lowerCamelCase : Tuple = is_training _lowerCamelCase : Dict = use_attention_mask _lowerCamelCase : List[str] = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : Any = num_attention_heads _lowerCamelCase : Any = d_ff _lowerCamelCase : Any = relative_attention_num_buckets _lowerCamelCase : Union[str, Any] = dropout_rate _lowerCamelCase : List[str] = initializer_factor _lowerCamelCase : List[Any] = eos_token_id _lowerCamelCase : List[str] = pad_token_id _lowerCamelCase : Any = decoder_start_token_id _lowerCamelCase : Any = None _lowerCamelCase : str = decoder_layers def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple: """simple docstring""" return TaConfig.from_pretrained("""google/umt5-base""") def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : str=None , _UpperCamelCase : int=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , ) ->Any: """simple docstring""" if attention_mask is None: _lowerCamelCase : List[str] = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: _lowerCamelCase : int = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: _lowerCamelCase : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_a) if decoder_head_mask is None: _lowerCamelCase : List[str] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_a) if cross_attn_head_mask is None: _lowerCamelCase : Union[str, Any] = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=_a) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: """simple docstring""" _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size) _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _lowerCamelCase : Tuple = input_ids.clamp(self.pad_token_id + 1) _lowerCamelCase : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1) _lowerCamelCase : Union[str, Any] = self.get_config() _lowerCamelCase : List[str] = config.num_attention_heads _lowerCamelCase : Optional[int] = self.prepare_inputs_dict(_a , _a , _a) return config, input_dict def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.prepare_config_and_inputs() return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : int) ->List[str]: """simple docstring""" return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , ) ->Optional[Any]: """simple docstring""" _lowerCamelCase : str = UMTaModel(config=_a) model.to(_a) model.eval() _lowerCamelCase : Dict = model( input_ids=_a , decoder_input_ids=_a , attention_mask=_a , decoder_attention_mask=_a , ) _lowerCamelCase : Optional[Any] = model(input_ids=_a , decoder_input_ids=_a) _lowerCamelCase : Optional[Any] = result.last_hidden_state _lowerCamelCase : Dict = result.past_key_values _lowerCamelCase : Any = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_a) , config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]) , 4) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Dict , ) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : int = UMTaModel(config=_a).get_decoder().to(_a).eval() # first forward pass _lowerCamelCase : str = model(_a , use_cache=_a) _lowerCamelCase : str = model(_a) _lowerCamelCase : Union[str, Any] = model(_a , use_cache=_a) self.parent.assertTrue(len(_a) == len(_a)) self.parent.assertTrue(len(_a) == len(_a) + 1) _lowerCamelCase : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size) # append to next input_ids and _lowerCamelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1) _lowerCamelCase : Union[str, Any] = model(_a)["""last_hidden_state"""] _lowerCamelCase : Tuple = model(_a , past_key_values=_a)["""last_hidden_state"""] # select random slice _lowerCamelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item() _lowerCamelCase : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach() _lowerCamelCase : List[Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict , ) ->List[Any]: """simple docstring""" _lowerCamelCase : int = UMTaModel(config=_a).to(_a).half().eval() _lowerCamelCase : Union[str, Any] = model(**_a)["""last_hidden_state"""] self.parent.assertFalse(torch.isnan(_a).any().item()) @require_torch class __snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): '''simple docstring''' _snake_case = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) _snake_case = (UMTaForConditionalGeneration,) if is_torch_available() else () _snake_case = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) _snake_case = True _snake_case = False _snake_case = False _snake_case = True _snake_case = True # The small UMT5 model needs higher percentages for CPU/MP tests _snake_case = [0.8, 0.9] def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]: """simple docstring""" _lowerCamelCase : Union[str, Any] = UMTaModelTester(self) @unittest.skip("""Test has a segmentation fault on torch 1.8.0""") def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Dict = UMTaModel(config_and_inputs[0]).to(_a) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=_a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""") def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str: """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_a) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: """simple docstring""" _lowerCamelCase : int = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""] _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = config_and_inputs[0] _lowerCamelCase : Tuple = UMTaForConditionalGeneration(_a).eval() model.to(_a) _lowerCamelCase : List[str] = { """head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_a), """decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a), """cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a), } for attn_name, (name, mask) in zip(_a , head_masking.items()): _lowerCamelCase : List[str] = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _lowerCamelCase : str = torch.ones( config.num_decoder_layers , config.num_heads , device=_a) _lowerCamelCase : Optional[Any] = model.generate( config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_a , return_dict_in_generate=_a , **_a , ) # We check the state of decoder_attentions and cross_attentions just from the last step _lowerCamelCase : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0) @unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""") def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" pass @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( """Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""") def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Optional[Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_a).to(_a) _lowerCamelCase : str = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_a , legacy=_a) _lowerCamelCase : Optional[Any] = [ """Bonjour monsieur <extra_id_0> bien <extra_id_1>.""", """No se como puedo <extra_id_0>.""", """This is the reason why we <extra_id_0> them.""", """The <extra_id_0> walks in <extra_id_1>, seats""", """A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""", ] _lowerCamelCase : Tuple = tokenizer(_a , return_tensors="""pt""" , padding=_a).input_ids # fmt: off _lowerCamelCase : Union[str, Any] = torch.tensor( [ [ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1], ]) # fmt: on torch.testing.assert_allclose(_a , _a) _lowerCamelCase : Optional[int] = model.generate(input_ids.to(_a)) _lowerCamelCase : int = [ """<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""", """<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", ] _lowerCamelCase : List[str] = tokenizer.batch_decode(_a) self.assertEqual(_a , _a)
712
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
0
from collections.abc import Sequence def A__ ( __A , __A = False ): '''simple docstring''' if not arr: return 0 _lowerCamelCase : Tuple = 0 if allow_empty_subarrays else float("""-inf""" ) _lowerCamelCase : Tuple = 0.0 for num in arr: _lowerCamelCase : int = max(0 if allow_empty_subarrays else num , curr_sum + num ) _lowerCamelCase : List[Any] = max(__UpperCAmelCase , __UpperCAmelCase ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase : int =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"""{max_subarray_sum(nums) = }""")
713
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = {} _lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""] _lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments) lowerCAmelCase : int =parser.parse_args() if args.num_workers is None: lowerCAmelCase : Any =multiprocessing.cpu_count() lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase : str =time.time() lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase : Dict =time.time() lowerCAmelCase : Dict =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
15
0
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--original_config_file", type=str, required=True, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--image_size", default=512, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") def A__ ( __A ): '''simple docstring''' if string == "True": return True elif string == "False": return False else: raise ValueError(F"""could not parse string as bool {string}""" ) parser.add_argument( "--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool ) parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int) lowerCAmelCase : Union[str, Any] =parser.parse_args() lowerCAmelCase : Dict =download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
714
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = IFPipeline _snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" return self._get_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : int = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa) _lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""") _lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""") del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCamelCase : str = None _lowerCamelCase : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components) _lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components) _lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : str = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Any = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : List[str] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def A__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
0
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __snake_case ( a__ , unittest.TestCase ): '''simple docstring''' _snake_case = DiTPipeline _snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } _snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _snake_case = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str: """simple docstring""" torch.manual_seed(0) _lowerCamelCase : List[str] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_A , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_A , ) _lowerCamelCase : Union[str, Any] = AutoencoderKL() _lowerCamelCase : Union[str, Any] = DDIMScheduler() _lowerCamelCase : Tuple = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any]=0) ->List[Any]: """simple docstring""" if str(_A).startswith("""mps"""): _lowerCamelCase : Dict = torch.manual_seed(_A) else: _lowerCamelCase : int = torch.Generator(device=_A).manual_seed(_A) _lowerCamelCase : Tuple = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" _lowerCamelCase : List[str] = 'cpu' _lowerCamelCase : Optional[int] = self.get_dummy_components() _lowerCamelCase : Optional[Any] = self.pipeline_class(**_A) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) _lowerCamelCase : Tuple = self.get_dummy_inputs(_A) _lowerCamelCase : Dict = pipe(**_A).images _lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3)) _lowerCamelCase : List[str] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7]) _lowerCamelCase : int = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(_A , 1E-3) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" self._test_inference_batch_single_identical(relax_max_difference=_A , expected_max_diff=1E-3) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Any: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @require_torch_gpu @slow class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple: """simple docstring""" _lowerCamelCase : Tuple = torch.manual_seed(0) _lowerCamelCase : Any = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""") pipe.to("""cuda""") _lowerCamelCase : List[Any] = ['vase', 'umbrella', 'white shark', 'white wolf'] _lowerCamelCase : str = pipe.get_label_ids(_A) _lowerCamelCase : Dict = pipe(_A , generator=_A , num_inference_steps=40 , output_type="""np""").images for word, image in zip(_A , _A): _lowerCamelCase : Any = load_numpy( F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""") assert np.abs((expected_image - image).max()) < 1E-2 def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""") _lowerCamelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to("""cuda""") _lowerCamelCase : str = ['vase', 'umbrella'] _lowerCamelCase : Any = pipe.get_label_ids(_A) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : List[str] = pipe(_A , generator=_A , num_inference_steps=25 , output_type="""np""").images for word, image in zip(_A , _A): _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" F"""/dit/{word}_512.npy""") assert np.abs((expected_image - image).max()) < 1E-1
715
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Any =logging.get_logger(__name__) lowerCAmelCase : List[Any] ={ "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = 'swin' _snake_case = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[str] = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : Union[str, Any] = embed_dim _lowerCamelCase : str = depths _lowerCamelCase : str = len(_UpperCamelCase) _lowerCamelCase : Optional[Any] = num_heads _lowerCamelCase : Tuple = window_size _lowerCamelCase : int = mlp_ratio _lowerCamelCase : Optional[int] = qkv_bias _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Dict = use_absolute_embeddings _lowerCamelCase : int = layer_norm_eps _lowerCamelCase : str = initializer_range _lowerCamelCase : Dict = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1)) _lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)] _lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices( out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = version.parse('1.11' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float: """simple docstring""" return 1E-4
15
0
from __future__ import annotations from fractions import Fraction def A__ ( __A , __A ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[Any] = 11 _lowerCamelCase : int = int("""1""" + """0""" * digit_len ) for num in range(_lowerCamelCase , _lowerCamelCase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ): solutions.append(F"""{num}/{den}""" ) den += 1 num += 1 _lowerCamelCase : Tuple = 10 return solutions def A__ ( __A = 2 ): '''simple docstring''' _lowerCamelCase : Optional[Any] = 1.0 for fraction in fraction_list(_lowerCamelCase ): _lowerCamelCase : Dict = Fraction(_lowerCamelCase ) result *= frac.denominator / frac.numerator return int(_lowerCamelCase ) if __name__ == "__main__": print(solution())
716
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (EulerDiscreteScheduler,) _snake_case = 10 def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : str = torch.manual_seed(0) _lowerCamelCase : str = self.dummy_model() _lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : int = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Dict = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""") _lowerCamelCase : int = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : Dict = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.prev_sample _lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Tuple = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : List[Any] = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : int = output.prev_sample _lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
15
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = SwinvaConfig() _lowerCamelCase : Tuple = swinva_name.split("""_""" ) _lowerCamelCase : str = name_split[1] if "to" in name_split[3]: _lowerCamelCase : Optional[Any] = int(name_split[3][-3:] ) else: _lowerCamelCase : int = int(name_split[3] ) if "to" in name_split[2]: _lowerCamelCase : Dict = int(name_split[2][-2:] ) else: _lowerCamelCase : Any = int(name_split[2][6:] ) if model_size == "tiny": _lowerCamelCase : Optional[Any] = 96 _lowerCamelCase : str = (2, 2, 6, 2) _lowerCamelCase : Any = (3, 6, 12, 24) elif model_size == "small": _lowerCamelCase : List[str] = 96 _lowerCamelCase : List[Any] = (2, 2, 18, 2) _lowerCamelCase : List[Any] = (3, 6, 12, 24) elif model_size == "base": _lowerCamelCase : List[str] = 128 _lowerCamelCase : int = (2, 2, 18, 2) _lowerCamelCase : Optional[Any] = (4, 8, 16, 32) else: _lowerCamelCase : List[Any] = 192 _lowerCamelCase : Optional[Any] = (2, 2, 18, 2) _lowerCamelCase : Dict = (6, 12, 24, 48) if "to" in swinva_name: _lowerCamelCase : Dict = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): _lowerCamelCase : List[Any] = 21_841 _lowerCamelCase : List[Any] = '''huggingface/label-files''' _lowerCamelCase : Union[str, Any] = '''imagenet-22k-id2label.json''' _lowerCamelCase : Dict = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : Optional[int] = {int(a_ ): v for k, v in idalabel.items()} _lowerCamelCase : List[str] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} else: _lowerCamelCase : Any = 1_000 _lowerCamelCase : Dict = '''huggingface/label-files''' _lowerCamelCase : int = '''imagenet-1k-id2label.json''' _lowerCamelCase : List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : Tuple = {int(a_ ): v for k, v in idalabel.items()} _lowerCamelCase : Union[str, Any] = idalabel _lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()} _lowerCamelCase : Optional[int] = img_size _lowerCamelCase : List[Any] = num_classes _lowerCamelCase : Dict = embed_dim _lowerCamelCase : Optional[int] = depths _lowerCamelCase : Optional[int] = num_heads _lowerCamelCase : Union[str, Any] = window_size return config def A__ ( __A ): '''simple docstring''' if "patch_embed.proj" in name: _lowerCamelCase : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: _lowerCamelCase : str = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: _lowerCamelCase : List[str] = '''encoder.''' + name if "attn.proj" in name: _lowerCamelCase : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: _lowerCamelCase : Optional[Any] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: _lowerCamelCase : int = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _lowerCamelCase : List[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: _lowerCamelCase : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _lowerCamelCase : List[str] = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: _lowerCamelCase : int = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: _lowerCamelCase : Dict = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: _lowerCamelCase : str = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: _lowerCamelCase : Any = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if name == "norm.weight": _lowerCamelCase : List[Any] = '''layernorm.weight''' if name == "norm.bias": _lowerCamelCase : Tuple = '''layernorm.bias''' if "head" in name: _lowerCamelCase : Any = name.replace("""head""" , """classifier""" ) else: _lowerCamelCase : Tuple = '''swinv2.''' + name return name def A__ ( __A , __A ): '''simple docstring''' for key in orig_state_dict.copy().keys(): _lowerCamelCase : Union[str, Any] = orig_state_dict.pop(a_ ) if "mask" in key: continue elif "qkv" in key: _lowerCamelCase : str = key.split(""".""" ) _lowerCamelCase : Optional[int] = int(key_split[1] ) _lowerCamelCase : str = int(key_split[3] ) _lowerCamelCase : Optional[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _lowerCamelCase : List[Any] = val[:dim, :] _lowerCamelCase : Optional[int] = val[dim : dim * 2, :] _lowerCamelCase : List[Any] = val[-dim:, :] else: _lowerCamelCase : int = val[:dim] _lowerCamelCase : str = val[ dim : dim * 2 ] _lowerCamelCase : int = val[-dim:] else: _lowerCamelCase : Tuple = val return orig_state_dict def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : List[str] = timm.create_model(a_ , pretrained=a_ ) timm_model.eval() _lowerCamelCase : List[Any] = get_swinva_config(a_ ) _lowerCamelCase : int = SwinvaForImageClassification(a_ ) model.eval() _lowerCamelCase : Tuple = convert_state_dict(timm_model.state_dict() , a_ ) model.load_state_dict(a_ ) _lowerCamelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _lowerCamelCase : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) ) _lowerCamelCase : Tuple = Image.open(requests.get(a_ , stream=a_ ).raw ) _lowerCamelCase : Tuple = image_processor(images=a_ , return_tensors="""pt""" ) _lowerCamelCase : Dict = timm_model(inputs["""pixel_values"""] ) _lowerCamelCase : Dict = model(**a_ ).logits assert torch.allclose(a_ , a_ , atol=1E-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a_ ) model.push_to_hub( repo_path_or_name=Path(a_ , a_ ) , organization="""nandwalritik""" , commit_message="""Add model""" , ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--swinv2_name", default="swinv2_tiny_patch4_window8_256", type=str, help="Name of the Swinv2 timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase : List[Any] =parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
717
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : Dict ={"vocab_file": "vocab.json"} lowerCAmelCase : List[str] ={ "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } lowerCAmelCase : int ={"mgp-str": 27} class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]: """simple docstring""" super().__init__( unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , ) with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle: _lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase) _lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()} @property def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" return len(self.vocab) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = [] for s in text: char_tokens.extend(_UpperCamelCase) return char_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict: """simple docstring""" return self.decoder.get(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase)) return _lowerCamelCase : Tuple = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""") return (vocab_file,)
15
0
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : int = [1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = 0, 0, 0 _lowerCamelCase : List[str] = ugly_nums[ia] * 2 _lowerCamelCase : int = ugly_nums[ia] * 3 _lowerCamelCase : Tuple = ugly_nums[ia] * 5 for _ in range(1 , _A ): _lowerCamelCase : Dict = min(_A , _A , _A ) ugly_nums.append(_A ) if next_num == next_a: ia += 1 _lowerCamelCase : Any = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 _lowerCamelCase : Any = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 _lowerCamelCase : List[Any] = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F"""{ugly_numbers(200) = }""")
718
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Tuple = ["""a""", """b""", """c"""] # Defaults to last layer if both are None _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""c"""]) self.assertEqual(_UpperCamelCase , [2]) # Out indices set to match out features _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features set to match out indices _lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features selected from negative indices _lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [-3, -1]) def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase) # Out features must be a list with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""]) # Out features must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""]) # Out indices must be a list or tuple with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""]) # Out indices must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""]) # Out features and out indices must be the same length with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""]) # Out features should match out indices with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""]) # Out features and out indices should be in order with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""]) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""]) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : int = BackboneMixin() _lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""] _lowerCamelCase : Tuple = ["""a""", """c"""] _lowerCamelCase : List[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly _lowerCamelCase : str = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""]) self.assertEqual(backbone.out_indices , [0, 1]) _lowerCamelCase : Optional[int] = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [-3, -1])
15
0
from __future__ import annotations lowerCAmelCase : Tuple ={ """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class __snake_case : '''simple docstring''' def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : Tuple) ->None: """simple docstring""" _lowerCamelCase : int = graph # mapping node to its parent in resulting breadth first tree _lowerCamelCase : Any = {} _lowerCamelCase : Dict = source_vertex def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->None: """simple docstring""" _lowerCamelCase : Any = {self.source_vertex} _lowerCamelCase : Dict = None _lowerCamelCase : List[str] = [self.source_vertex] # first in first out queue while queue: _lowerCamelCase : Optional[Any] = queue.pop(0) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(snake_case_) _lowerCamelCase : str = vertex queue.append(snake_case_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : int) ->str: """simple docstring""" if target_vertex == self.source_vertex: return self.source_vertex _lowerCamelCase : Optional[int] = self.parent.get(snake_case_) if target_vertex_parent is None: _lowerCamelCase : Any = ( F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}""" ) raise ValueError(snake_case_) return self.shortest_path(snake_case_) + F"""->{target_vertex}""" if __name__ == "__main__": lowerCAmelCase : Optional[int] =Graph(graph, "G") g.breath_first_search() print(g.shortest_path("D")) print(g.shortest_path("G")) print(g.shortest_path("Foo"))
719
import math def A__ ( __A ): '''simple docstring''' assert isinstance(__A , __A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def A__ ( __A , __A=1 , **__A ): '''simple docstring''' _lowerCamelCase : Dict = factor * value _lowerCamelCase : str = value while not is_prime(__A ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **__A ) return value
15
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase : Optional[Any] =False class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Any: """simple docstring""" return 12 @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: """simple docstring""" return 12 @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]: """simple docstring""" return 32 @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: """simple docstring""" torch.manual_seed(0) _lowerCamelCase : List[Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") return tokenizer @property def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int: """simple docstring""" torch.manual_seed(0) _lowerCamelCase : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(_lowercase) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" torch.manual_seed(0) _lowerCamelCase : Union[str, Any] = 12 _lowerCamelCase : Tuple = 12 _lowerCamelCase : Tuple = { """attention_bias""": True, """cross_attention_dim""": 32, """attention_head_dim""": height * width, """num_attention_heads""": 1, """num_vector_embeds""": self.num_embed, """num_embeds_ada_norm""": self.num_embeds_ada_norm, """norm_num_groups""": 32, """sample_size""": width, """activation_fn""": """geglu-approximate""", } _lowerCamelCase : Optional[Any] = TransformeraDModel(**_lowercase) return model def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase : str = """cpu""" _lowerCamelCase : List[str] = self.dummy_vqvae _lowerCamelCase : Any = self.dummy_text_encoder _lowerCamelCase : Tuple = self.dummy_tokenizer _lowerCamelCase : int = self.dummy_transformer _lowerCamelCase : int = VQDiffusionScheduler(self.num_embed) _lowerCamelCase : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase) _lowerCamelCase : Optional[Any] = VQDiffusionPipeline( vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , ) _lowerCamelCase : int = pipe.to(_lowercase) pipe.set_progress_bar_config(disable=_lowercase) _lowerCamelCase : List[Any] = """teddy bear playing in the pool""" _lowerCamelCase : Dict = torch.Generator(device=_lowercase).manual_seed(0) _lowerCamelCase : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""") _lowerCamelCase : Optional[int] = output.images _lowerCamelCase : List[Any] = torch.Generator(device=_lowercase).manual_seed(0) _lowerCamelCase : Dict = pipe( [prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2)[0] _lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] _lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) _lowerCamelCase : Dict = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str: """simple docstring""" _lowerCamelCase : int = """cpu""" _lowerCamelCase : List[Any] = self.dummy_vqvae _lowerCamelCase : Optional[int] = self.dummy_text_encoder _lowerCamelCase : List[Any] = self.dummy_tokenizer _lowerCamelCase : Union[str, Any] = self.dummy_transformer _lowerCamelCase : str = VQDiffusionScheduler(self.num_embed) _lowerCamelCase : List[Any] = LearnedClassifierFreeSamplingEmbeddings( learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length) _lowerCamelCase : Union[str, Any] = VQDiffusionPipeline( vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , ) _lowerCamelCase : Any = pipe.to(_lowercase) pipe.set_progress_bar_config(disable=_lowercase) _lowerCamelCase : Tuple = """teddy bear playing in the pool""" _lowerCamelCase : str = torch.Generator(device=_lowercase).manual_seed(0) _lowerCamelCase : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""") _lowerCamelCase : Dict = output.images _lowerCamelCase : Union[str, Any] = torch.Generator(device=_lowercase).manual_seed(0) _lowerCamelCase : Any = pipe( [prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2)[0] _lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1] _lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) _lowerCamelCase : int = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: """simple docstring""" _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""") _lowerCamelCase : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""") _lowerCamelCase : Optional[Any] = pipeline.to(_lowercase) pipeline.set_progress_bar_config(disable=_lowercase) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though _lowerCamelCase : Any = torch.Generator(device=_lowercase).manual_seed(0) _lowerCamelCase : Optional[int] = pipeline( """teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 2.0
720
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple: """simple docstring""" super().__init__(*_UpperCamelCase , **_UpperCamelCase) requires_backends(self , """vision""") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[int] = {} if top_k is not None: _lowerCamelCase : str = top_k return {}, {}, postprocess_params def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict: """simple docstring""" return super().__call__(_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Tuple = load_image(_UpperCamelCase) _lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework) return model_inputs def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Any = self.model(**_UpperCamelCase) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str: """simple docstring""" if top_k > self.model.config.num_labels: _lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": _lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0] _lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase) elif self.framework == "tf": _lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0] _lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase) _lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"""Unsupported framework: {self.framework}""") _lowerCamelCase : str = scores.tolist() _lowerCamelCase : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
15
0
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __snake_case : '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]=13 , _UpperCamelCase : Optional[Any]=30 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Dict=3 , _UpperCamelCase : str=True , _UpperCamelCase : Any=True , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : int=4 , _UpperCamelCase : Optional[Any]=37 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Tuple=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Optional[Any]=0.6 , _UpperCamelCase : Dict=None , ) ->List[str]: """simple docstring""" _lowerCamelCase : Optional[int] = parent _lowerCamelCase : Optional[Any] = batch_size _lowerCamelCase : Any = image_size _lowerCamelCase : Union[str, Any] = patch_size _lowerCamelCase : Optional[int] = num_channels _lowerCamelCase : Optional[int] = is_training _lowerCamelCase : Dict = use_labels _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : str = intermediate_size _lowerCamelCase : str = hidden_act _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : Any = attention_probs_dropout_prob _lowerCamelCase : List[Any] = type_sequence_label_size _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : List[str] = mask_ratio _lowerCamelCase : Optional[int] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _lowerCamelCase : Any = (image_size // patch_size) ** 2 _lowerCamelCase : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def _SCREAMING_SNAKE_CASE ( self : int) ->str: """simple docstring""" _lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCamelCase : Union[str, Any] = None if self.use_labels: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : Union[str, Any] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any]) ->Tuple: """simple docstring""" _lowerCamelCase : int = TFViTMAEModel(config=A_) _lowerCamelCase : List[Any] = model(A_ , training=A_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]) ->Optional[int]: """simple docstring""" _lowerCamelCase : Any = TFViTMAEForPreTraining(A_) _lowerCamelCase : Any = model(A_ , training=A_) # expected sequence length = num_patches _lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2 _lowerCamelCase : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels)) # test greyscale images _lowerCamelCase : Dict = 1 _lowerCamelCase : Tuple = TFViTMAEForPreTraining(A_) _lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _lowerCamelCase : Dict = model(A_ , training=A_) _lowerCamelCase : List[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels)) def _SCREAMING_SNAKE_CASE ( self : Dict) ->int: """simple docstring""" _lowerCamelCase : Any = self.prepare_config_and_inputs() ((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) : Optional[Any] = config_and_inputs _lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __snake_case ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _snake_case = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _snake_case = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} _snake_case = False _snake_case = False _snake_case = False _snake_case = False def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase : Dict = TFViTMAEModelTester(self) _lowerCamelCase : Dict = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""") def _SCREAMING_SNAKE_CASE ( self : Any) ->Any: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Any = model_class(A_) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer)) _lowerCamelCase : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer)) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Optional[Any] = model_class(A_) _lowerCamelCase : List[Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Any = [*signature.parameters.keys()] _lowerCamelCase : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A_) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*A_) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: """simple docstring""" np.random.seed(2) _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Tuple = int((config.image_size // config.patch_size) ** 2) _lowerCamelCase : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: _lowerCamelCase : Any = model_class(A_) _lowerCamelCase : List[Any] = self._prepare_for_class(A_ , A_) _lowerCamelCase : Optional[int] = model(A_ , noise=A_) _lowerCamelCase : List[Any] = copy.deepcopy(self._prepare_for_class(A_ , A_)) _lowerCamelCase : Union[str, Any] = model(**A_ , noise=A_) _lowerCamelCase : str = outputs_dict[0].numpy() _lowerCamelCase : Dict = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)) , 1E-6) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict: """simple docstring""" np.random.seed(2) _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : List[Any] = int((config.image_size // config.patch_size) ** 2) _lowerCamelCase : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) def prepare_numpy_arrays(_UpperCamelCase : Union[str, Any]): _lowerCamelCase : List[str] = {} for k, v in inputs_dict.items(): if tf.is_tensor(A_): _lowerCamelCase : int = v.numpy() else: _lowerCamelCase : List[Any] = np.array(A_) return inputs_np_dict for model_class in self.all_model_classes: _lowerCamelCase : List[str] = model_class(A_) _lowerCamelCase : str = self._prepare_for_class(A_ , A_) _lowerCamelCase : Tuple = prepare_numpy_arrays(A_) _lowerCamelCase : List[str] = model(A_ , noise=A_) _lowerCamelCase : Tuple = model(**A_ , noise=A_) self.assert_outputs_same(A_ , A_) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any]) ->Dict: """simple docstring""" np.random.seed(2) _lowerCamelCase : Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2) _lowerCamelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) _lowerCamelCase : str = tf.constant(A_) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _lowerCamelCase : str = tf_noise super().check_pt_tf_models(A_ , A_ , A_) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" np.random.seed(2) _lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Dict = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(A_) if module_member_name.endswith("""MainLayer""") # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""")] == model_class.__name__[: -len("""Model""")] for module_member in (getattr(A_ , A_),) if isinstance(A_ , A_) and tf.keras.layers.Layer in module_member.__bases__ and getattr(A_ , """_keras_serializable""" , A_) } _lowerCamelCase : List[str] = int((config.image_size // config.patch_size) ** 2) _lowerCamelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) _lowerCamelCase : List[str] = tf.convert_to_tensor(A_) inputs_dict.update({"""noise""": noise}) for main_layer_class in tf_main_layer_classes: _lowerCamelCase : Optional[int] = main_layer_class(A_) _lowerCamelCase : Optional[int] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype) for name, tensor in inputs_dict.items() } _lowerCamelCase : Tuple = tf.keras.Model(A_ , outputs=main_layer(A_)) _lowerCamelCase : Tuple = model(A_) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCamelCase : Any = os.path.join(A_ , """keras_model.h5""") model.save(A_) _lowerCamelCase : int = tf.keras.models.load_model( A_ , custom_objects={main_layer_class.__name__: main_layer_class}) assert isinstance(A_ , tf.keras.Model) _lowerCamelCase : str = model(A_) self.assert_outputs_same(A_ , A_) @slow def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" np.random.seed(2) _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2) _lowerCamelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: _lowerCamelCase : Union[str, Any] = model_class(A_) _lowerCamelCase : int = self._prepare_for_class(A_ , A_) _lowerCamelCase : Any = model(A_ , noise=A_) if model_class.__name__ == "TFViTMAEModel": _lowerCamelCase : List[Any] = outputs.last_hidden_state.numpy() _lowerCamelCase : Union[str, Any] = 0 else: _lowerCamelCase : Optional[int] = outputs.logits.numpy() _lowerCamelCase : Dict = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A_ , saved_model=A_) _lowerCamelCase : Optional[Any] = model_class.from_pretrained(A_) _lowerCamelCase : Dict = model(A_ , noise=A_) if model_class.__name__ == "TFViTMAEModel": _lowerCamelCase : Any = after_outputs["""last_hidden_state"""].numpy() _lowerCamelCase : List[str] = 0 else: _lowerCamelCase : Optional[int] = after_outputs["""logits"""].numpy() _lowerCamelCase : Any = 0 _lowerCamelCase : str = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(A_ , 1E-5) def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" np.random.seed(2) _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2) _lowerCamelCase : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: _lowerCamelCase : Union[str, Any] = model_class(A_) _lowerCamelCase : int = self._prepare_for_class(A_ , A_) _lowerCamelCase : Dict = model(A_ , noise=A_) _lowerCamelCase : int = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(A_) _lowerCamelCase : Any = model_class.from_config(model.get_config()) # make sure it also accepts a normal config _lowerCamelCase : str = model_class.from_config(model.config) _lowerCamelCase : List[Any] = new_model(A_) # Build model new_model.set_weights(model.get_weights()) _lowerCamelCase : Optional[Any] = new_model(A_ , noise=A_) self.assert_outputs_same(A_ , A_) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.""") def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: """simple docstring""" pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""") def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict: """simple docstring""" pass @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : List[str] = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""") self.assertIsNotNone(A_) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""") if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: """simple docstring""" np.random.seed(2) _lowerCamelCase : Tuple = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""") _lowerCamelCase : Dict = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="""tf""") # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _lowerCamelCase : str = ViTMAEConfig() _lowerCamelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) _lowerCamelCase : int = np.random.uniform(size=(1, num_patches)) # forward pass _lowerCamelCase : Optional[Any] = model(**A_ , noise=A_) # verify the logits _lowerCamelCase : Tuple = tf.convert_to_tensor([1, 196, 768]) self.assertEqual(outputs.logits.shape , A_) _lowerCamelCase : List[str] = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]]) tf.debugging.assert_near(outputs.logits[0, :3, :3] , A_ , atol=1E-4)
721
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = ViTImageProcessor if is_vision_available() else None @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = (3, 32, 128) _lowerCamelCase : str = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase)))) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(_UpperCamelCase) + """\n""") _lowerCamelCase : Any = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) _lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) return image_input def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0) _lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : int = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""") _lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Optional[int] = """test""" _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase) _lowerCamelCase : Dict = tokenizer(_UpperCamelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = """test""" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase): processor() def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Any = processor.char_decode(_UpperCamelCase) _lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase) _lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = None _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = torch.randn(1 , 27 , 38) _lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257) _lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522) _lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
15
0
from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Tuple =logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase : Optional[Any] ={ "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json" ), } class __snake_case ( lowercase__ ): '''simple docstring''' _snake_case = 'retribert' def __init__( self : List[Any] , _UpperCamelCase : List[Any]=3_0522 , _UpperCamelCase : Optional[int]=768 , _UpperCamelCase : Any=8 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Tuple=3072 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Optional[Any]=512 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : Dict=1E-1_2 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=128 , _UpperCamelCase : Optional[Any]=0 , **_UpperCamelCase : Tuple , ) ->Optional[int]: """simple docstring""" super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase) _lowerCamelCase : int = vocab_size _lowerCamelCase : Optional[int] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : Tuple = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Tuple = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : Tuple = layer_norm_eps _lowerCamelCase : Tuple = share_encoders _lowerCamelCase : List[Any] = projection_dim
700
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__A , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__A ) return parser.parse_args() def A__ ( ): '''simple docstring''' _lowerCamelCase : List[str] = parse_args() # Import training_script as a module. _lowerCamelCase : List[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowerCamelCase : Optional[Any] = script_fpath.stem _lowerCamelCase : Dict = importlib.import_module(__A ) # Patch sys.argv _lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
15
0
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Dict = old_name if "patch_embed" in old_name: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = old_name.split(""".""" ) if layer == "0": _lowerCamelCase : Optional[int] = old_name.replace("""0""" , """convolution1""" ) elif layer == "1": _lowerCamelCase : Dict = old_name.replace("""1""" , """batchnorm_before""" ) elif layer == "3": _lowerCamelCase : Optional[int] = old_name.replace("""3""" , """convolution2""" ) else: _lowerCamelCase : Any = old_name.replace("""4""" , """batchnorm_after""" ) if "network" in old_name and re.search(r"""\d\.\d""" , __A ): _lowerCamelCase : Optional[Any] = r"""\b\d{2}\b""" if bool(re.search(__A , __A ) ): _lowerCamelCase : int = re.search(r"""\d\.\d\d.""" , __A ).group() else: _lowerCamelCase : Dict = re.search(r"""\d\.\d.""" , __A ).group() if int(match[0] ) < 6: _lowerCamelCase : Union[str, Any] = old_name.replace(__A , """""" ) _lowerCamelCase : int = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] ) _lowerCamelCase : int = """intermediate_stages.""" + trimmed_name else: _lowerCamelCase : int = old_name.replace(__A , """""" ) if int(match[2] ) < num_meta4D_last_stage: _lowerCamelCase : Optional[Any] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] ) else: _lowerCamelCase : int = str(int(match[2] ) - num_meta4D_last_stage ) _lowerCamelCase : Tuple = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index ) if "norm1" in old_name: _lowerCamelCase : Optional[int] = trimmed_name.replace("""norm1""" , """layernorm1""" ) elif "norm2" in old_name: _lowerCamelCase : List[str] = trimmed_name.replace("""norm2""" , """layernorm2""" ) elif "fc1" in old_name: _lowerCamelCase : Optional[Any] = trimmed_name.replace("""fc1""" , """linear_in""" ) elif "fc2" in old_name: _lowerCamelCase : str = trimmed_name.replace("""fc2""" , """linear_out""" ) _lowerCamelCase : Dict = """last_stage.""" + trimmed_name elif "network" in old_name and re.search(r""".\d.""" , __A ): _lowerCamelCase : str = old_name.replace("""network""" , """intermediate_stages""" ) if "fc" in new_name: _lowerCamelCase : List[str] = new_name.replace("""fc""" , """convolution""" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): _lowerCamelCase : List[str] = new_name.replace("""norm1""" , """batchnorm_before""" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): _lowerCamelCase : Optional[int] = new_name.replace("""norm2""" , """batchnorm_after""" ) if "proj" in new_name: _lowerCamelCase : Dict = new_name.replace("""proj""" , """projection""" ) if "dist_head" in new_name: _lowerCamelCase : List[Any] = new_name.replace("""dist_head""" , """distillation_classifier""" ) elif "head" in new_name: _lowerCamelCase : List[Any] = new_name.replace("""head""" , """classifier""" ) elif "patch_embed" in new_name: _lowerCamelCase : List[str] = """efficientformer.""" + new_name elif new_name == "norm.weight" or new_name == "norm.bias": _lowerCamelCase : Optional[int] = new_name.replace("""norm""" , """layernorm""" ) _lowerCamelCase : Any = """efficientformer.""" + new_name else: _lowerCamelCase : int = """efficientformer.encoder.""" + new_name return new_name def A__ ( __A , __A ): '''simple docstring''' for key in checkpoint.copy().keys(): _lowerCamelCase : List[str] = checkpoint.pop(__A ) _lowerCamelCase : List[Any] = val return checkpoint def A__ ( ): '''simple docstring''' _lowerCamelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCamelCase : Optional[int] = Image.open(requests.get(__A , stream=__A ).raw ) return image def A__ ( __A , __A , __A , __A ): '''simple docstring''' _lowerCamelCase : List[Any] = torch.load(__A , map_location="""cpu""" )["""model"""] _lowerCamelCase : str = EfficientFormerConfig.from_json_file(__A ) _lowerCamelCase : int = EfficientFormerForImageClassificationWithTeacher(__A ) _lowerCamelCase : Optional[Any] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] ) _lowerCamelCase : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1 _lowerCamelCase : int = convert_torch_checkpoint(__A , __A ) model.load_state_dict(__A ) model.eval() _lowerCamelCase : Tuple = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } # prepare image _lowerCamelCase : str = prepare_img() _lowerCamelCase : Optional[Any] = 256 _lowerCamelCase : Tuple = 224 _lowerCamelCase : Optional[Any] = EfficientFormerImageProcessor( size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , ) _lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="""pt""" ).pixel_values # original processing pipeline _lowerCamelCase : Optional[Any] = Compose( [ Resize(__A , interpolation=pillow_resamplings["""bicubic"""] ), CenterCrop(__A ), ToTensor(), Normalize(__A , __A ), ] ) _lowerCamelCase : int = image_transforms(__A ).unsqueeze(0 ) assert torch.allclose(__A , __A ) _lowerCamelCase : Dict = model(__A ) _lowerCamelCase : Any = outputs.logits _lowerCamelCase : List[Any] = (1, 1_000) if "l1" in model_name: _lowerCamelCase : Optional[int] = torch.Tensor( [-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] ) assert torch.allclose(logits[0, :10] , __A , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: _lowerCamelCase : Tuple = torch.Tensor( [-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] ) assert torch.allclose(logits[0, :10] , __A , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: _lowerCamelCase : Dict = torch.Tensor( [-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(__A ).mkdir(exist_ok=__A ) model.save_pretrained(__A ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(__A ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("""Pushing model to the hub...""" ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add model""" , use_temp_dir=__A , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add image processor""" , use_temp_dir=__A , ) if __name__ == "__main__": lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to EfficientFormer pytorch checkpoint.", ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for EfficientFormer model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) parser.set_defaults(push_to_hub=True) lowerCAmelCase : Dict =parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
701
def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) if n == 0: return 0 _lowerCamelCase : Tuple = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) _lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def A__ ( __A , __A , __A ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCamelCase : int = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) _lowerCamelCase : Optional[Any] = max_revenue return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _lowerCamelCase : Any = 0 for i in range(1 , n + 1 ): _lowerCamelCase : Any = max_rev[i] for j in range(1 , i + 1 ): _lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) _lowerCamelCase : int = max_revenue_i return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' if n < 0: _lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): _lowerCamelCase : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ F"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = [6, 10, 12, 15, 20, 23] _lowerCamelCase : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCamelCase : Tuple = 36 _lowerCamelCase : Any = top_down_cut_rod(__A , __A ) _lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A ) _lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
15
0
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) lowerCAmelCase : str ={"""vocab_file""": """vocab.json"""} lowerCAmelCase : int ={ """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } lowerCAmelCase : Dict ={"""mgp-str""": 27} class __snake_case ( UpperCamelCase_ ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Union[str, Any]="[GO]" , _UpperCamelCase : Optional[int]="[s]" , _UpperCamelCase : Union[str, Any]="[GO]" , **_UpperCamelCase : Optional[int]) ->Optional[Any]: """simple docstring""" super().__init__( unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , **__A , ) with open(__A , encoding="""utf-8""") as vocab_handle: _lowerCamelCase : str = json.load(__A) _lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()} @property def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple: """simple docstring""" return len(self.vocab) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : List[Any] = [] for s in text: char_tokens.extend(__A) return char_tokens def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str) ->int: """simple docstring""" return self.vocab.get(__A , self.vocab.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" return self.decoder.get(__A) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Optional[int]: """simple docstring""" if not os.path.isdir(__A): logger.error("""Vocabulary path ({}) should be a directory""".format(__A)) return _lowerCamelCase : Optional[int] = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) with open(__A , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__A , ensure_ascii=__A) + """\n""") return (vocab_file,)
702
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
0
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int]) ->Any: """simple docstring""" super().__init__() _lowerCamelCase : Any = nn.Linear(3 , 4) _lowerCamelCase : List[Any] = nn.BatchNormad(4) _lowerCamelCase : Any = nn.Linear(4 , 5) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Dict) ->Any: """simple docstring""" return self.lineara(self.batchnorm(self.lineara(_UpperCamelCase))) class __snake_case ( lowercase__ ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Union[str, Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : List[Any]) ->Any: """simple docstring""" return (args[0] + 1,) + args[1:], kwargs class __snake_case ( lowercase__ ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple) ->str: """simple docstring""" return output + 1 class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = ModelForTest() _lowerCamelCase : Dict = ModelHook() add_hook_to_module(_UpperCamelCase , _UpperCamelCase) self.assertEqual(test_model._hf_hook , _UpperCamelCase) self.assertTrue(hasattr(_UpperCamelCase , """_old_forward""")) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""") self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ["""x"""]) remove_hook_from_module(_UpperCamelCase) self.assertFalse(hasattr(_UpperCamelCase , """_hf_hook""")) self.assertFalse(hasattr(_UpperCamelCase , """_old_forward""")) def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = ModelForTest() _lowerCamelCase : Optional[Any] = ModelHook() add_hook_to_module(_UpperCamelCase , _UpperCamelCase) add_hook_to_module(_UpperCamelCase , _UpperCamelCase , append=_UpperCamelCase) self.assertEqual(isinstance(test_model._hf_hook , _UpperCamelCase) , _UpperCamelCase) self.assertEqual(len(test_model._hf_hook.hooks) , 2) self.assertTrue(hasattr(_UpperCamelCase , """_old_forward""")) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""") self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ["""x"""]) remove_hook_from_module(_UpperCamelCase) self.assertFalse(hasattr(_UpperCamelCase , """_hf_hook""")) self.assertFalse(hasattr(_UpperCamelCase , """_old_forward""")) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Any = ModelForTest() _lowerCamelCase : Any = torch.randn(2 , 3) _lowerCamelCase : Any = test_model(x + 1) _lowerCamelCase : int = test_model(x + 2) _lowerCamelCase : List[str] = PreForwardHook() add_hook_to_module(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = test_model(_UpperCamelCase) self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-5)) # Attaching a hook to a model when it already has one replaces, does not chain _lowerCamelCase : Dict = PreForwardHook() add_hook_to_module(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Tuple = test_model(_UpperCamelCase) self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-5)) # You need to use the sequential hook to chain two or more hooks _lowerCamelCase : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook()) add_hook_to_module(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = test_model(_UpperCamelCase) assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-5) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: """simple docstring""" _lowerCamelCase : int = ModelForTest() _lowerCamelCase : Tuple = torch.randn(2 , 3) _lowerCamelCase : Tuple = test_model(_UpperCamelCase) _lowerCamelCase : str = PostForwardHook() add_hook_to_module(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = test_model(_UpperCamelCase) self.assertTrue(torch.allclose(_UpperCamelCase , output + 1 , atol=1E-5)) # Attaching a hook to a model when it already has one replaces, does not chain _lowerCamelCase : Tuple = PostForwardHook() add_hook_to_module(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[Any] = test_model(_UpperCamelCase) self.assertTrue(torch.allclose(_UpperCamelCase , output + 1 , atol=1E-5)) # You need to use the sequential hook to chain two or more hooks _lowerCamelCase : List[str] = SequentialHook(PostForwardHook() , PostForwardHook()) add_hook_to_module(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Tuple = test_model(_UpperCamelCase) assert torch.allclose(_UpperCamelCase , output + 2 , atol=1E-5) def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: """simple docstring""" _lowerCamelCase : str = ModelForTest() _lowerCamelCase : Tuple = torch.randn(2 , 3) _lowerCamelCase : int = test_model(_UpperCamelCase) _lowerCamelCase : Dict = PostForwardHook() add_hook_to_module(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = test_model(_UpperCamelCase) self.assertTrue(torch.allclose(_UpperCamelCase , output + 1)) self.assertTrue(outputa.requires_grad) _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Optional[Any] = test_model(_UpperCamelCase) self.assertFalse(outputa.requires_grad) @require_multi_gpu def _SCREAMING_SNAKE_CASE ( self : int) ->Dict: """simple docstring""" _lowerCamelCase : Any = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""")) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0)) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0)) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1)) self.assertEqual(model.lineara.weight.device , torch.device(0)) self.assertEqual(model.batchnorm.weight.device , torch.device(0)) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0)) self.assertEqual(model.lineara.weight.device , torch.device(1)) # We can still make a forward pass. The input does not need to be on any particular device _lowerCamelCase : Any = torch.randn(2 , 3) _lowerCamelCase : Union[str, Any] = model(_UpperCamelCase) self.assertEqual(output.device , torch.device(1)) # We can add a general hook to put back output on same device as input. add_hook_to_module(_UpperCamelCase , AlignDevicesHook(io_same_device=_UpperCamelCase)) _lowerCamelCase : List[str] = torch.randn(2 , 3).to(0) _lowerCamelCase : Tuple = model(_UpperCamelCase) self.assertEqual(output.device , torch.device(0)) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: """simple docstring""" _lowerCamelCase : Tuple = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""")) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) # This will move each submodule on different devices _lowerCamelCase : int = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True} add_hook_to_module(model.lineara , AlignDevicesHook(**_UpperCamelCase)) add_hook_to_module(model.batchnorm , AlignDevicesHook(**_UpperCamelCase)) add_hook_to_module(model.lineara , AlignDevicesHook(**_UpperCamelCase)) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""")) self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) # Buffers are not included in the offload by default, so are on the execution device _lowerCamelCase : str = torch.device(hook_kwargs["""execution_device"""]) self.assertEqual(model.batchnorm.running_mean.device , _UpperCamelCase) _lowerCamelCase : int = torch.randn(2 , 3) _lowerCamelCase : str = model(_UpperCamelCase) self.assertEqual(output.device , _UpperCamelCase) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara) remove_hook_from_module(model.batchnorm) remove_hook_from_module(model.lineara) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""")) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) # Now test with buffers included in the offload _lowerCamelCase : Tuple = { "execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True, "offload_buffers": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**_UpperCamelCase)) add_hook_to_module(model.batchnorm , AlignDevicesHook(**_UpperCamelCase)) add_hook_to_module(model.lineara , AlignDevicesHook(**_UpperCamelCase)) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""")) self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""")) _lowerCamelCase : int = torch.randn(2 , 3) _lowerCamelCase : Tuple = model(_UpperCamelCase) self.assertEqual(output.device , _UpperCamelCase) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara) remove_hook_from_module(model.batchnorm) remove_hook_from_module(model.lineara) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""")) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""")) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) # This will move each submodule on different devices _lowerCamelCase : Any = 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook(_UpperCamelCase , execution_device=_UpperCamelCase , offload=_UpperCamelCase) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""")) self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) # Buffers are not included in the offload by default, so are on the execution device _lowerCamelCase : Optional[int] = torch.device(_UpperCamelCase) self.assertEqual(model.batchnorm.running_mean.device , _UpperCamelCase) _lowerCamelCase : Any = torch.randn(2 , 3) _lowerCamelCase : Dict = model(_UpperCamelCase) self.assertEqual(output.device , _UpperCamelCase) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_UpperCamelCase) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""")) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) # Now test with buffers included in the offload attach_align_device_hook(_UpperCamelCase , execution_device=_UpperCamelCase , offload=_UpperCamelCase , offload_buffers=_UpperCamelCase) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""")) self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""")) _lowerCamelCase : Tuple = torch.randn(2 , 3) _lowerCamelCase : Any = model(_UpperCamelCase) self.assertEqual(output.device , _UpperCamelCase) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_UpperCamelCase) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""")) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: """simple docstring""" _lowerCamelCase : Tuple = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""")) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) # This will move each submodule on different devices _lowerCamelCase : Union[str, Any] = 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook( _UpperCamelCase , execution_device=_UpperCamelCase , offload=_UpperCamelCase , weights_map=model.state_dict()) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""")) self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) # Buffers are not included in the offload by default, so are on the execution device _lowerCamelCase : str = torch.device(_UpperCamelCase) self.assertEqual(model.batchnorm.running_mean.device , _UpperCamelCase) _lowerCamelCase : Any = torch.randn(2 , 3) _lowerCamelCase : str = model(_UpperCamelCase) self.assertEqual(output.device , _UpperCamelCase) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_UpperCamelCase) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""")) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) # Now test with buffers included in the offload attach_align_device_hook( _UpperCamelCase , execution_device=_UpperCamelCase , offload=_UpperCamelCase , weights_map=model.state_dict() , offload_buffers=_UpperCamelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""")) self.assertEqual(model.lineara.weight.device , torch.device("""meta""")) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""")) _lowerCamelCase : Optional[int] = torch.randn(2 , 3) _lowerCamelCase : Dict = model(_UpperCamelCase) self.assertEqual(output.device , _UpperCamelCase) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(_UpperCamelCase) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""")) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""")) self.assertEqual(model.lineara.weight.device , torch.device("""cpu"""))
703
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
0
def A__ ( __A ): '''simple docstring''' def merge(__A , __A ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(__A ) <= 1: return collection _lowerCamelCase : Dict = len(__A ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip() lowerCAmelCase : Dict = [int(item) for item in user_input.split(",")] print(*merge_sort(unsorted), sep=",")
704
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
0
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging lowerCAmelCase : Tuple =logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class __snake_case ( __a ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : int = 101) ->List[str]: """simple docstring""" _lowerCamelCase : Optional[Any] = length def __len__( self : Dict) ->Tuple: """simple docstring""" return self.length def __getitem__( self : Optional[Any] , _UpperCamelCase : Union[str, Any]) ->Union[str, Any]: """simple docstring""" return i class __snake_case : '''simple docstring''' def __call__( self : Tuple , _UpperCamelCase : Optional[int]) ->Dict: """simple docstring""" return {"input_ids": torch.tensor(a_), "labels": torch.tensor(a_)} class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any]) ->Optional[int]: """simple docstring""" super().__init__() # Add some (unused) params otherwise DDP will complain. _lowerCamelCase : Optional[int] = nn.Linear(120 , 80) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int]=None) ->str: """simple docstring""" if labels is not None: return torch.tensor(0.0 , device=input_ids.device), input_ids else: return input_ids class __snake_case ( __a ): '''simple docstring''' @require_torch_neuroncore def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" _lowerCamelCase : int = F"""--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split() _lowerCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir() _lowerCamelCase : int = F"""--output_dir {output_dir}""".split() _lowerCamelCase : List[Any] = ["""torchrun"""] + distributed_args + args execute_subprocess_async(a_ , env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class __snake_case ( __a ): '''simple docstring''' @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" _lowerCamelCase : Optional[int] = F"""--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split() _lowerCamelCase : Tuple = self.get_auto_remove_tmp_dir() _lowerCamelCase : Tuple = F"""--output_dir {output_dir}""".split() _lowerCamelCase : Optional[int] = ["""torchrun"""] + distributed_args + args execute_subprocess_async(a_ , env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py lowerCAmelCase : List[Any] =HfArgumentParser((TrainingArguments,)) lowerCAmelCase : int =parser.parse_args_into_dataclasses()[0] logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """ F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}""" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: lowerCAmelCase : Tuple =DummyDataset(dataset_length) def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = list(range(len(snake_case__ ) ) ) _lowerCamelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( """Predictions and/or labels do not match expected results:\n - predictions: """ F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" ) return {"success": success} lowerCAmelCase : Dict =Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) lowerCAmelCase : List[str] =trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) lowerCAmelCase : Optional[int] =trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) lowerCAmelCase : List[Any] =2 lowerCAmelCase : Optional[Any] =trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) lowerCAmelCase : str =trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) lowerCAmelCase : List[Any] =None
705
from __future__ import annotations lowerCAmelCase : int =[] def A__ ( __A , __A , __A ): '''simple docstring''' for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def A__ ( __A , __A ): '''simple docstring''' if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): _lowerCamelCase : int = 1 solve(__A , row + 1 ) _lowerCamelCase : List[str] = 0 return False def A__ ( __A ): '''simple docstring''' for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) lowerCAmelCase : int =8 lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
15
0
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCAmelCase : Any =logging.get_logger(__name__) def A__ ( __A , __A , __A , __A ): '''simple docstring''' def constraint_to_multiple_of(__A , __A , __A=0 , __A=None ): _lowerCamelCase : List[str] = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowerCamelCase : Tuple = math.floor(val / multiple ) * multiple if x < min_val: _lowerCamelCase : Tuple = math.ceil(val / multiple ) * multiple return x _lowerCamelCase : List[str] = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size _lowerCamelCase , _lowerCamelCase : Any = get_image_size(__snake_case ) _lowerCamelCase , _lowerCamelCase : Tuple = output_size # determine new height and width _lowerCamelCase : List[Any] = output_height / input_height _lowerCamelCase : Optional[int] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowerCamelCase : List[str] = scale_width else: # fit height _lowerCamelCase : str = scale_height _lowerCamelCase : str = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case ) _lowerCamelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case ) return (new_height, new_width) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : int , _UpperCamelCase : Dict = True , _UpperCamelCase : Dict = None , _UpperCamelCase : int = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[int] = False , _UpperCamelCase : Dict = 1 , _UpperCamelCase : Optional[Any] = True , _UpperCamelCase : Union[str, Any] = 1 / 255 , _UpperCamelCase : List[Any] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : List[Any] = None , **_UpperCamelCase : Union[str, Any] , ) ->Optional[Any]: """simple docstring""" super().__init__(**__lowerCAmelCase) _lowerCamelCase : Dict = size if size is not None else {"""height""": 384, """width""": 384} _lowerCamelCase : int = get_size_dict(__lowerCAmelCase) _lowerCamelCase : Union[str, Any] = do_resize _lowerCamelCase : Tuple = size _lowerCamelCase : Optional[Any] = keep_aspect_ratio _lowerCamelCase : List[Any] = ensure_multiple_of _lowerCamelCase : Tuple = resample _lowerCamelCase : List[str] = do_rescale _lowerCamelCase : str = rescale_factor _lowerCamelCase : Dict = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : List[Any] = False , _UpperCamelCase : List[str] = 1 , _UpperCamelCase : str = PILImageResampling.BICUBIC , _UpperCamelCase : Union[str, Any] = None , **_UpperCamelCase : Optional[Any] , ) ->Dict: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(__lowerCAmelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""") _lowerCamelCase : Optional[Any] = get_resize_output_image_size( __lowerCAmelCase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=__lowerCAmelCase , multiple=__lowerCAmelCase , ) return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] = None , **_UpperCamelCase : Dict , ) ->Any: """simple docstring""" return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Dict = None , **_UpperCamelCase : Dict , ) ->Tuple: """simple docstring""" return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase) def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : List[str] = None , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : List[str] = None , _UpperCamelCase : str = None , _UpperCamelCase : str = None , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : str = None , _UpperCamelCase : str = ChannelDimension.FIRST , **_UpperCamelCase : int , ) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : int = size if size is not None else self.size _lowerCamelCase : Tuple = get_size_dict(__lowerCAmelCase) _lowerCamelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowerCamelCase : int = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : Tuple = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Union[str, Any] = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = make_list_of_images(__lowerCAmelCase) if not valid_images(__lowerCAmelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : List[str] = [to_numpy_array(__lowerCAmelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase) for image in images] if do_rescale: _lowerCamelCase : str = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase) for image in images] if do_normalize: _lowerCamelCase : Optional[Any] = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase) for image in images] _lowerCamelCase : Dict = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase) for image in images] _lowerCamelCase : Union[str, Any] = {"""pixel_values""": images} return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] = None) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowerCAmelCase) != len(__lowerCAmelCase): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""") if is_torch_tensor(__lowerCAmelCase): _lowerCamelCase : int = target_sizes.numpy() _lowerCamelCase : Union[str, Any] = [] for idx in range(len(__lowerCAmelCase)): _lowerCamelCase : List[str] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__lowerCAmelCase) _lowerCamelCase : List[str] = resized_logits[0].argmax(dim=0) semantic_segmentation.append(__lowerCAmelCase) else: _lowerCamelCase : Dict = logits.argmax(dim=1) _lowerCamelCase : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
706
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
0
def A__ ( __A , __A ): if not (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) _lowerCamelCase : Optional[int] = len(UpperCAmelCase__ ) _lowerCamelCase : Union[str, Any] = len(UpperCAmelCase__ ) _lowerCamelCase : str = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Dict = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: _lowerCamelCase : int = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: _lowerCamelCase : List[Any] = i _lowerCamelCase : str = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
707
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = 0 for ch in input_str: _lowerCamelCase : Optional[Any] = ord(__A ) _lowerCamelCase : List[str] = pow(2 , __A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
15
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) class __snake_case ( a__ ): '''simple docstring''' def __init__( self : List[str] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any]) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
708
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""") _lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : str = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""") _lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
15
0
import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def A__ ( __A , __A , __A ): '''simple docstring''' if gpta_config_file == "": _lowerCamelCase : Optional[Any] = GPTaConfig() else: _lowerCamelCase : str = GPTaConfig.from_json_file(UpperCAmelCase__ ) _lowerCamelCase : int = GPTaModel(UpperCAmelCase__ ) # Load weights from numpy load_tf_weights_in_gpta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model _lowerCamelCase : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME _lowerCamelCase : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , UpperCAmelCase__ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--gpt2_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) lowerCAmelCase : Optional[int] =parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
709
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
0
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowerCAmelCase : Tuple =getLogger(__name__) lowerCAmelCase : List[Any] ="cuda" if torch.cuda.is_available() else "cpu" def A__ ( __A , __A , __A , __A = 8 , __A = DEFAULT_DEVICE , __A=False , __A="summarization" , __A=None , **__A , ): '''simple docstring''' _lowerCamelCase : List[Any] = Path(_snake_case ).open("""w""" , encoding="""utf-8""" ) _lowerCamelCase : Optional[Any] = str(_snake_case ) _lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).to(_snake_case ) if fpaa: _lowerCamelCase : Optional[int] = model.half() _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_snake_case ) logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. _lowerCamelCase : str = time.time() # update config with task specific params use_task_specific_params(_snake_case , _snake_case ) if prefix is None: _lowerCamelCase : int = prefix or getattr(model.config , """prefix""" , """""" ) or """""" for examples_chunk in tqdm(list(chunks(_snake_case , _snake_case ) ) ): _lowerCamelCase : str = [prefix + text for text in examples_chunk] _lowerCamelCase : List[Any] = tokenizer(_snake_case , return_tensors="""pt""" , truncation=_snake_case , padding="""longest""" ).to(_snake_case ) _lowerCamelCase : Union[str, Any] = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_snake_case , ) _lowerCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) for hypothesis in dec: fout.write(hypothesis + """\n""" ) fout.flush() fout.close() _lowerCamelCase : Optional[int] = int(time.time() - start_time ) # seconds _lowerCamelCase : str = len(_snake_case ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def A__ ( ): '''simple docstring''' return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" ) def A__ ( __A=True ): '''simple docstring''' _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""model_name""" , type=_snake_case , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""input_path""" , type=_snake_case , help="""like cnn_dm/test.source""" ) parser.add_argument("""save_path""" , type=_snake_case , help="""where to save summaries""" ) parser.add_argument("""--reference_path""" , type=_snake_case , required=_snake_case , help="""like cnn_dm/test.target""" ) parser.add_argument("""--score_path""" , type=_snake_case , required=_snake_case , default="""metrics.json""" , help="""where to save metrics""" ) parser.add_argument("""--device""" , type=_snake_case , required=_snake_case , default=_snake_case , help="""cuda, cuda:1, cpu etc.""" ) parser.add_argument( """--prefix""" , type=_snake_case , required=_snake_case , default=_snake_case , help="""will be added to the begininng of src examples""" ) parser.add_argument("""--task""" , type=_snake_case , default="""summarization""" , help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""" , type=_snake_case , default=8 , required=_snake_case , help="""batch size""" ) parser.add_argument( """--n_obs""" , type=_snake_case , default=-1 , required=_snake_case , help="""How many observations. Defaults to all.""" ) parser.add_argument("""--fp16""" , action="""store_true""" ) parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" ) parser.add_argument( """--info""" , nargs="""?""" , type=_snake_case , const=datetime_now() , help=( """use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.""" """ lang=en-ru. If no value is passed, the current datetime string will be used.""" ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate _lowerCamelCase , _lowerCamelCase : Optional[int] = parser.parse_known_args() _lowerCamelCase : int = parse_numeric_n_bool_cl_kwargs(_snake_case ) if parsed_args and verbose: print(F"""parsed the following generate kwargs: {parsed_args}""" ) _lowerCamelCase : Any = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: _lowerCamelCase : int = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=_snake_case ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("""Can\'t mix --fp16 and --device cpu""" ) _lowerCamelCase : Any = generate_summaries_or_translations( _snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_snake_case , ) if args.reference_path is None: return {} # Compute scores _lowerCamelCase : int = calculate_bleu if """translation""" in args.task else calculate_rouge _lowerCamelCase : Any = [x.rstrip() for x in open(args.save_path ).readlines()] _lowerCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_snake_case )] _lowerCamelCase : int = score_fn(_snake_case , _snake_case ) scores.update(_snake_case ) if args.dump_args: scores.update(_snake_case ) if args.info: _lowerCamelCase : List[Any] = args.info if verbose: print(_snake_case ) if args.score_path is not None: json.dump(_snake_case , open(args.score_path , """w""" ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
710
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
0
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A__ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=_lowercase , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=_lowercase , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=_lowercase ) return parser.parse_args() def A__ ( ): '''simple docstring''' _lowerCamelCase : List[Any] = parse_args() # Import training_script as a module. _lowerCamelCase : Optional[int] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowerCamelCase : Tuple = script_fpath.stem _lowerCamelCase : List[Any] = importlib.import_module(_lowercase ) # Patch sys.argv _lowerCamelCase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
711
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
0
import random def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Optional[Any] = num - 1 _lowerCamelCase : Optional[int] = 0 while s % 2 == 0: _lowerCamelCase : Any = s // 2 t += 1 for _ in range(5 ): _lowerCamelCase : List[Any] = random.randrange(2 , num - 1 ) _lowerCamelCase : List[Any] = pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if v != 1: _lowerCamelCase : Optional[Any] = 0 while v != (num - 1): if i == t - 1: return False else: _lowerCamelCase : List[Any] = i + 1 _lowerCamelCase : str = (v**2) % num return True def A__ ( __A ): '''simple docstring''' if num < 2: return False _lowerCamelCase : Tuple = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__UpperCamelCase ) def A__ ( __A = 1_024 ): '''simple docstring''' while True: _lowerCamelCase : Dict = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(__UpperCamelCase ): return num if __name__ == "__main__": lowerCAmelCase : int =generate_large_prime() print(("Prime number:", num)) print(("is_prime_low_num:", is_prime_low_num(num)))
712
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
0
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : List[str] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCamelCase : Optional[Any] = """""" _lowerCamelCase : Optional[Any] = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCamelCase : List[Any] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCamelCase : Optional[Any] = [1 for i in range(len(__A ) )] # for each character in new_string find corresponding palindromic string _lowerCamelCase : Optional[int] = 0 for j in range(len(__A ) ): _lowerCamelCase : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(__A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCamelCase : Dict = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCamelCase : Optional[int] = j - k + 1 # noqa: E741 _lowerCamelCase : List[str] = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCamelCase : int = length[j] _lowerCamelCase : Optional[int] = j # create that string _lowerCamelCase : Dict = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
713
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = {} _lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""] _lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments) lowerCAmelCase : int =parser.parse_args() if args.num_workers is None: lowerCAmelCase : Any =multiprocessing.cpu_count() lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase : str =time.time() lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase : Dict =time.time() lowerCAmelCase : Dict =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
15
0
from __future__ import annotations def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Optional[int] = [] _lowerCamelCase : int = [] _lowerCamelCase : List[Any] = 0 _lowerCamelCase : Tuple = sum(SCREAMING_SNAKE_CASE_ ) create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return result def A__ ( __A , __A , __A , __A , __A , __A , ): '''simple docstring''' if sum(SCREAMING_SNAKE_CASE_ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE_ )) < max_sum: return if sum(SCREAMING_SNAKE_CASE_ ) == max_sum: result.append(SCREAMING_SNAKE_CASE_ ) return for index in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ): create_state_space_tree( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE_ , remaining_nums_sum - nums[index] , ) lowerCAmelCase : str =[3, 34, 4, 12, 5, 2] lowerCAmelCase : Optional[Any] =9 lowerCAmelCase : Optional[int] =generate_sum_of_subsets_soln(nums, max_sum) print(*result)
714
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = IFPipeline _snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" return self._get_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : int = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa) _lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""") _lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""") del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCamelCase : str = None _lowerCamelCase : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components) _lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components) _lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : str = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Any = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : List[str] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def A__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
0
from __future__ import annotations from typing import Any class __snake_case : '''simple docstring''' def __init__( self : Optional[Any] , _UpperCamelCase : int) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : str = num_of_nodes _lowerCamelCase : list[list[int]] = [] _lowerCamelCase : dict[int, int] = {} def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" self.m_edges.append([u_node, v_node, weight]) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Dict) ->List[Any]: """simple docstring""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node]) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Dict) ->Union[str, Any]: """simple docstring""" if self.m_component[u_node] != u_node: for k in self.m_component: _lowerCamelCase : int = self.find_component(A_) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple) ->List[str]: """simple docstring""" if component_size[u_node] <= component_size[v_node]: _lowerCamelCase : Any = v_node component_size[v_node] += component_size[u_node] self.set_component(A_) elif component_size[u_node] >= component_size[v_node]: _lowerCamelCase : List[Any] = self.find_component(A_) component_size[u_node] += component_size[v_node] self.set_component(A_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Tuple = [] _lowerCamelCase : Tuple = 0 _lowerCamelCase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes): self.m_component.update({node: node}) component_size.append(1) _lowerCamelCase : List[str] = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: _lowerCamelCase : List[str] = edge _lowerCamelCase : List[str] = self.m_component[u] _lowerCamelCase : Optional[Any] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): _lowerCamelCase : List[str] = [u, v, w] for edge in minimum_weight_edge: if isinstance(A_ , A_): _lowerCamelCase : Optional[int] = edge _lowerCamelCase : Dict = self.m_component[u] _lowerCamelCase : Optional[Any] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(A_ , A_ , A_) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""") num_of_components -= 1 _lowerCamelCase : Optional[Any] = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""") def A__ ( ): '''simple docstring''' pass if __name__ == "__main__": import doctest doctest.testmod()
715
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Any =logging.get_logger(__name__) lowerCAmelCase : List[Any] ={ "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = 'swin' _snake_case = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[str] = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : Union[str, Any] = embed_dim _lowerCamelCase : str = depths _lowerCamelCase : str = len(_UpperCamelCase) _lowerCamelCase : Optional[Any] = num_heads _lowerCamelCase : Tuple = window_size _lowerCamelCase : int = mlp_ratio _lowerCamelCase : Optional[int] = qkv_bias _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Dict = use_absolute_embeddings _lowerCamelCase : int = layer_norm_eps _lowerCamelCase : str = initializer_range _lowerCamelCase : Dict = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1)) _lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)] _lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices( out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = version.parse('1.11' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float: """simple docstring""" return 1E-4
15
0
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) lowerCAmelCase : Optional[Any] =logging.getLogger(__name__) lowerCAmelCase : List[Any] ='''Hello world! cécé herlolip''' lowerCAmelCase : Tuple =namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : List[Any] = BertAbsConfig( temp_dir=""".""" , finetune_bert=__A , large=__A , share_emb=__A , use_bert_emb=__A , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , ) _lowerCamelCase : Dict = torch.load(__A , lambda __A , __A : storage ) _lowerCamelCase : List[Any] = AbsSummarizer(__A , torch.device("""cpu""" ) , __A ) original.eval() _lowerCamelCase : List[Any] = BertAbsSummarizer(__A , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) _lowerCamelCase : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs _lowerCamelCase : Tuple = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__A )) ) _lowerCamelCase : Optional[int] = torch.tensor(__A ).unsqueeze(0 ) _lowerCamelCase : Dict = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__A )) ) _lowerCamelCase : Dict = torch.tensor(__A ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass _lowerCamelCase : Optional[int] = encoder_input_ids _lowerCamelCase : str = decoder_input_ids _lowerCamelCase : str = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : Tuple = None _lowerCamelCase : str = None _lowerCamelCase : Optional[int] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical _lowerCamelCase : List[Any] = original(__A , __A , __A , __A , __A , __A , __A )[0] _lowerCamelCase : Dict = original.generator(__A ) _lowerCamelCase : List[str] = new_model( __A , __A , __A , __A , __A )[0] _lowerCamelCase : List[str] = new_model.generator(__A ) _lowerCamelCase : List[str] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(__A ) ) _lowerCamelCase : List[str] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(__A ) ) _lowerCamelCase : List[str] = torch.allclose(__A , __A , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) lowerCAmelCase : Optional[int] =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
716
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (EulerDiscreteScheduler,) _snake_case = 10 def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : str = torch.manual_seed(0) _lowerCamelCase : str = self.dummy_model() _lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : int = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Dict = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""") _lowerCamelCase : int = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : Dict = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.prev_sample _lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Tuple = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : List[Any] = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : int = output.prev_sample _lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
15
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) class __snake_case ( UpperCAmelCase__ ): '''simple docstring''' _snake_case = ["input_features", "is_longer"] def __init__( self : List[str] , _UpperCamelCase : Optional[Any]=64 , _UpperCamelCase : int=4_8000 , _UpperCamelCase : str=480 , _UpperCamelCase : str=10 , _UpperCamelCase : List[str]=1024 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : int=False , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 1_4000 , _UpperCamelCase : int = None , _UpperCamelCase : str = "fusion" , _UpperCamelCase : str = "repeatpad" , **_UpperCamelCase : Optional[Any] , ) ->str: """simple docstring""" super().__init__( feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , ) _lowerCamelCase : int = top_db _lowerCamelCase : Any = truncation _lowerCamelCase : Optional[Any] = padding _lowerCamelCase : int = fft_window_size _lowerCamelCase : Optional[int] = (fft_window_size >> 1) + 1 _lowerCamelCase : Optional[int] = hop_length _lowerCamelCase : Union[str, Any] = max_length_s _lowerCamelCase : Union[str, Any] = max_length_s * sampling_rate _lowerCamelCase : List[str] = sampling_rate _lowerCamelCase : Optional[int] = frequency_min _lowerCamelCase : List[Any] = frequency_max _lowerCamelCase : Union[str, Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale="""htk""" , ) _lowerCamelCase : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , ) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__) _lowerCamelCase : Optional[Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[np.array] = None) ->np.ndarray: """simple docstring""" _lowerCamelCase : int = spectrogram( lowerCamelCase__ , window_function(self.fft_window_size , """hann""") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel="""dB""" , ) return log_mel_spectrogram.T def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : str) ->Optional[int]: """simple docstring""" _lowerCamelCase : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk _lowerCamelCase : int = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk _lowerCamelCase : List[Any] = [0] # randomly choose index for each part _lowerCamelCase : List[Any] = np.random.choice(ranges[0]) _lowerCamelCase : List[str] = np.random.choice(ranges[1]) _lowerCamelCase : List[str] = np.random.choice(ranges[2]) _lowerCamelCase : Dict = mel[idx_front : idx_front + chunk_frames, :] _lowerCamelCase : int = mel[idx_middle : idx_middle + chunk_frames, :] _lowerCamelCase : int = mel[idx_back : idx_back + chunk_frames, :] _lowerCamelCase : Dict = torch.tensor(mel[None, None, :]) _lowerCamelCase : Any = torch.nn.functional.interpolate( lowerCamelCase__ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=lowerCamelCase__) _lowerCamelCase : List[str] = mel_shrink[0][0].numpy() _lowerCamelCase : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : str) ->np.array: """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": _lowerCamelCase : Optional[int] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad _lowerCamelCase : Optional[Any] = len(lowerCamelCase__) - max_length _lowerCamelCase : List[Any] = np.random.randint(0 , overflow + 1) _lowerCamelCase : str = waveform[idx : idx + max_length] _lowerCamelCase : Dict = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": _lowerCamelCase : List[str] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters) _lowerCamelCase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed _lowerCamelCase : str = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. _lowerCamelCase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0) _lowerCamelCase : List[str] = False else: _lowerCamelCase : Optional[Any] = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) _lowerCamelCase : Union[str, Any] = True else: raise NotImplementedError(F"""data_truncating {truncation} not implemented""") else: _lowerCamelCase : Union[str, Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": _lowerCamelCase : Optional[int] = int(max_length / len(lowerCamelCase__)) _lowerCamelCase : int = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1))[:max_length] if padding == "repeatpad": _lowerCamelCase : Optional[Any] = int(max_length / len(lowerCamelCase__)) _lowerCamelCase : str = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__)) _lowerCamelCase : Any = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0) if truncation == "fusion": _lowerCamelCase : List[Any] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters) _lowerCamelCase : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: _lowerCamelCase : List[str] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : str , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : str = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : str , ) ->BatchFeature: """simple docstring""" _lowerCamelCase : Any = truncation if truncation is not None else self.truncation _lowerCamelCase : Any = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""") else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""") _lowerCamelCase : Tuple = isinstance(lowerCamelCase__ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""") _lowerCamelCase : int = is_batched_numpy or ( isinstance(lowerCamelCase__ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: _lowerCamelCase : Tuple = [np.asarray(lowerCamelCase__ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray): _lowerCamelCase : int = np.asarray(lowerCamelCase__ , dtype=np.floataa) elif isinstance(lowerCamelCase__ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): _lowerCamelCase : str = raw_speech.astype(np.floataa) # always return batch if not is_batched: _lowerCamelCase : int = [np.asarray(lowerCamelCase__)] # convert to mel spectrogram, truncate and pad if needed. _lowerCamelCase : List[str] = [ self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__) for waveform in raw_speech ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Tuple = [] for mel, longer in padded_inputs: input_mel.append(lowerCamelCase__) is_longer.append(lowerCamelCase__) if truncation == "fusion" and sum(lowerCamelCase__) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer _lowerCamelCase : List[Any] = np.random.randint(0 , len(lowerCamelCase__)) _lowerCamelCase : List[str] = True if isinstance(input_mel[0] , lowerCamelCase__): _lowerCamelCase : Dict = [np.asarray(lowerCamelCase__ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool _lowerCamelCase : Any = [[longer] for longer in is_longer] _lowerCamelCase : str = {"input_features": input_mel, "is_longer": is_longer} _lowerCamelCase : Dict = BatchFeature(lowerCamelCase__) if return_tensors is not None: _lowerCamelCase : Optional[int] = input_features.convert_to_tensors(lowerCamelCase__) return input_features
717
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : Dict ={"vocab_file": "vocab.json"} lowerCAmelCase : List[str] ={ "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } lowerCAmelCase : int ={"mgp-str": 27} class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]: """simple docstring""" super().__init__( unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , ) with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle: _lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase) _lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()} @property def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" return len(self.vocab) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = [] for s in text: char_tokens.extend(_UpperCamelCase) return char_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict: """simple docstring""" return self.decoder.get(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase)) return _lowerCamelCase : Tuple = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""") return (vocab_file,)
15
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase : List[Any] ={"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple =["ViTFeatureExtractor"] lowerCAmelCase : int =["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] =[ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] =[ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple =[ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
718
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Tuple = ["""a""", """b""", """c"""] # Defaults to last layer if both are None _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""c"""]) self.assertEqual(_UpperCamelCase , [2]) # Out indices set to match out features _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features set to match out indices _lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features selected from negative indices _lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [-3, -1]) def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase) # Out features must be a list with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""]) # Out features must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""]) # Out indices must be a list or tuple with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""]) # Out indices must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""]) # Out features and out indices must be the same length with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""]) # Out features should match out indices with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""]) # Out features and out indices should be in order with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""]) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""]) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : int = BackboneMixin() _lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""] _lowerCamelCase : Tuple = ["""a""", """c"""] _lowerCamelCase : List[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly _lowerCamelCase : str = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""]) self.assertEqual(backbone.out_indices , [0, 1]) _lowerCamelCase : Optional[int] = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [-3, -1])
15
0
import json import os import torch from diffusers import UNetaDModel os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True) os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) def A__ ( __A ): '''simple docstring''' if hor == 128: _lowerCamelCase : int = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") _lowerCamelCase : Any = (32, 128, 256) _lowerCamelCase : Optional[int] = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: _lowerCamelCase : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") _lowerCamelCase : Optional[int] = (32, 64, 128, 256) _lowerCamelCase : List[str] = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") _lowerCamelCase : int = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) _lowerCamelCase : List[Any] = model.state_dict() _lowerCamelCase : Optional[Any] = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 65_536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } _lowerCamelCase : Dict = UNetaDModel(**UpperCamelCase__ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) _lowerCamelCase : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): _lowerCamelCase : Union[str, Any] = state_dict.pop(UpperCamelCase__ ) hf_value_function.load_state_dict(UpperCamelCase__ ) torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 65_536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } _lowerCamelCase : Union[str, Any] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) _lowerCamelCase : str = model _lowerCamelCase : Any = UNetaDModel(**UpperCamelCase__ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) _lowerCamelCase : Dict = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): _lowerCamelCase : List[Any] = state_dict.pop(UpperCamelCase__ ) hf_value_function.load_state_dict(UpperCamelCase__ ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
719
import math def A__ ( __A ): '''simple docstring''' assert isinstance(__A , __A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def A__ ( __A , __A=1 , **__A ): '''simple docstring''' _lowerCamelCase : Dict = factor * value _lowerCamelCase : str = value while not is_prime(__A ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **__A ) return value
15
0
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand lowerCAmelCase : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name def A__ ( __A ): '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(_UpperCamelCase ): return ext raise Exception( F"""Unable to determine file format from file extension {path}. """ F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Any = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _lowerCamelCase : int = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format _lowerCamelCase : List[str] = PipelineDataFormat.from_str( format=_UpperCamelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(_UpperCamelCase , _UpperCamelCase ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , _UpperCamelCase : Pipeline , _UpperCamelCase : PipelineDataFormat) ->str: """simple docstring""" _lowerCamelCase : Dict = nlp _lowerCamelCase : int = reader @staticmethod def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : ArgumentParser) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Tuple = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""") run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""") run_parser.add_argument("""--input""" , type=UpperCAmelCase__ , help="""Path to the file to use for inference""") run_parser.add_argument("""--output""" , type=UpperCAmelCase__ , help="""Path to the file that will be used post to write results.""") run_parser.add_argument("""--model""" , type=UpperCAmelCase__ , help="""Name or path to the model to instantiate.""") run_parser.add_argument("""--config""" , type=UpperCAmelCase__ , help="""Name or path to the model\'s config to instantiate.""") run_parser.add_argument( """--tokenizer""" , type=UpperCAmelCase__ , help="""Name of the tokenizer to use. (default: same as the model name)""") run_parser.add_argument( """--column""" , type=UpperCAmelCase__ , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , ) run_parser.add_argument( """--format""" , type=UpperCAmelCase__ , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , ) run_parser.add_argument( """--device""" , type=UpperCAmelCase__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , ) run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""") run_parser.set_defaults(func=UpperCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str) ->Dict: """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self._nlp, [] for entry in self._reader: _lowerCamelCase : int = nlp(**UpperCAmelCase__) if self._reader.is_multi_columns else nlp(UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__): outputs.append(UpperCAmelCase__) else: outputs += output # Saving data if self._nlp.binary_output: _lowerCamelCase : Optional[int] = self._reader.save_binary(UpperCAmelCase__) logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""") else: self._reader.save(UpperCAmelCase__)
720
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple: """simple docstring""" super().__init__(*_UpperCamelCase , **_UpperCamelCase) requires_backends(self , """vision""") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[int] = {} if top_k is not None: _lowerCamelCase : str = top_k return {}, {}, postprocess_params def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict: """simple docstring""" return super().__call__(_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Tuple = load_image(_UpperCamelCase) _lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework) return model_inputs def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Any = self.model(**_UpperCamelCase) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str: """simple docstring""" if top_k > self.model.config.num_labels: _lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": _lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0] _lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase) elif self.framework == "tf": _lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0] _lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase) _lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"""Unsupported framework: {self.framework}""") _lowerCamelCase : str = scores.tolist() _lowerCamelCase : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
15
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : int ={ "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json", } class __snake_case ( __a ): '''simple docstring''' _snake_case = 'luke' def __init__( self : str , _UpperCamelCase : Tuple=5_0267 , _UpperCamelCase : Any=50_0000 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : int=256 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : List[str]=3072 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : int=512 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Optional[int]=0.0_2 , _UpperCamelCase : Optional[int]=1E-1_2 , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[str]=None , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : Any=2 , **_UpperCamelCase : Optional[int] , ) ->List[Any]: """simple docstring""" super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__) _lowerCamelCase : Optional[Any] = vocab_size _lowerCamelCase : Optional[int] = entity_vocab_size _lowerCamelCase : str = hidden_size _lowerCamelCase : List[str] = entity_emb_size _lowerCamelCase : Optional[int] = num_hidden_layers _lowerCamelCase : Any = num_attention_heads _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : Optional[Any] = hidden_dropout_prob _lowerCamelCase : Dict = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Any = type_vocab_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[str] = use_entity_aware_attention _lowerCamelCase : int = classifier_dropout
721
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = ViTImageProcessor if is_vision_available() else None @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = (3, 32, 128) _lowerCamelCase : str = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase)))) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(_UpperCamelCase) + """\n""") _lowerCamelCase : Any = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) _lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) return image_input def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0) _lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : int = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""") _lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Optional[int] = """test""" _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase) _lowerCamelCase : Dict = tokenizer(_UpperCamelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = """test""" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase): processor() def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Any = processor.char_decode(_UpperCamelCase) _lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase) _lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = None _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = torch.randn(1 , 27 , 38) _lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257) _lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522) _lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
15
0
import os # Precomputes a list of the 100 first triangular numbers lowerCAmelCase : List[Any] =[int(0.5 * n * (n + 1)) for n in range(1, 101)] def A__ ( ): '''simple docstring''' _lowerCamelCase : List[Any] = os.path.dirname(os.path.realpath(_SCREAMING_SNAKE_CASE ) ) _lowerCamelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , """words.txt""" ) _lowerCamelCase : Optional[int] = """""" with open(_SCREAMING_SNAKE_CASE ) as f: _lowerCamelCase : Any = f.readline() _lowerCamelCase : int = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] _lowerCamelCase : List[Any] = [ word for word in [sum(ord(_SCREAMING_SNAKE_CASE ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(solution())
700
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__A , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__A ) return parser.parse_args() def A__ ( ): '''simple docstring''' _lowerCamelCase : List[str] = parse_args() # Import training_script as a module. _lowerCamelCase : List[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowerCamelCase : Optional[Any] = script_fpath.stem _lowerCamelCase : Dict = importlib.import_module(__A ) # Patch sys.argv _lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
15
0
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( """compression_format, is_archive""" , [ ("""7z""", True), ("""bz2""", False), ("""gzip""", False), ("""lz4""", False), ("""tar""", True), ("""xz""", False), ("""zip""", True), ("""zstd""", False), ] , ) def A__ ( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): '''simple docstring''' _lowerCamelCase : str = { """7z""": (seven_zip_file, SevenZipExtractor), """bz2""": (bza_file, BzipaExtractor), """gzip""": (gz_file, GzipExtractor), """lz4""": (lza_file, LzaExtractor), """tar""": (tar_file, TarExtractor), """xz""": (xz_file, XzExtractor), """zip""": (zip_file, ZipExtractor), """zstd""": (zstd_file, ZstdExtractor), } _lowerCamelCase , _lowerCamelCase : Optional[Any] = input_paths_and_base_extractors[compression_format] if input_path is None: _lowerCamelCase : Dict = F"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase__ ) assert base_extractor.is_extractable(lowercase__ ) _lowerCamelCase : Any = tmp_path / ("""extracted""" if is_archive else """extracted.txt""") base_extractor.extract(lowercase__ , lowercase__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name _lowerCamelCase : Union[str, Any] = file_path.read_text(encoding="""utf-8""" ) else: _lowerCamelCase : List[Any] = output_path.read_text(encoding="""utf-8""" ) _lowerCamelCase : Optional[int] = text_file.read_text(encoding="""utf-8""" ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( """compression_format, is_archive""" , [ ("""7z""", True), ("""bz2""", False), ("""gzip""", False), ("""lz4""", False), ("""tar""", True), ("""xz""", False), ("""zip""", True), ("""zstd""", False), ] , ) def A__ ( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): '''simple docstring''' _lowerCamelCase : Optional[Any] = { """7z""": seven_zip_file, """bz2""": bza_file, """gzip""": gz_file, """lz4""": lza_file, """tar""": tar_file, """xz""": xz_file, """zip""": zip_file, """zstd""": zstd_file, } _lowerCamelCase : Union[str, Any] = input_paths[compression_format] if input_path is None: _lowerCamelCase : Any = F"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase__ ) _lowerCamelCase : Optional[int] = Extractor.infer_extractor_format(lowercase__ ) assert extractor_format is not None _lowerCamelCase : int = tmp_path / ("""extracted""" if is_archive else """extracted.txt""") Extractor.extract(lowercase__ , lowercase__ , lowercase__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name _lowerCamelCase : Dict = file_path.read_text(encoding="""utf-8""" ) else: _lowerCamelCase : Any = output_path.read_text(encoding="""utf-8""" ) _lowerCamelCase : Optional[Any] = text_file.read_text(encoding="""utf-8""" ) assert extracted_file_content == expected_file_content @pytest.fixture def A__ ( __A , __A ): '''simple docstring''' import tarfile _lowerCamelCase : Any = tmp_path / """data_dot_dot""" directory.mkdir() _lowerCamelCase : Optional[int] = directory / """tar_file_with_dot_dot.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""..""" , text_file.name ) ) return path @pytest.fixture def A__ ( __A ): '''simple docstring''' import tarfile _lowerCamelCase : Optional[Any] = tmp_path / """data_sym_link""" directory.mkdir() _lowerCamelCase : Dict = directory / """tar_file_with_sym_link.tar""" os.symlink("""..""" , directory / """subdir""" , target_is_directory=lowercase__ ) with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( """insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , ) def A__ ( __A , __A , __A , __A , __A , __A ): '''simple docstring''' _lowerCamelCase : str = { """tar_file_with_dot_dot""": tar_file_with_dot_dot, """tar_file_with_sym_link""": tar_file_with_sym_link, } _lowerCamelCase : Optional[int] = insecure_tar_files[insecure_tar_file] _lowerCamelCase : Union[str, Any] = tmp_path / """extracted""" TarExtractor.extract(lowercase__ , lowercase__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def A__ ( __A ): '''simple docstring''' _lowerCamelCase : int = tmpdir / """not_a_zip_file""" # From: https://github.com/python/cpython/pull/5053 _lowerCamelCase : List[str] = ( B"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00""" B"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I""" B"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07""" B"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82""" ) with not_a_zip_file.open("""wb""" ) as f: f.write(lowercase__ ) assert zipfile.is_zipfile(str(lowercase__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(lowercase__ ) # but we're right
701
def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) if n == 0: return 0 _lowerCamelCase : Tuple = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) _lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def A__ ( __A , __A , __A ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCamelCase : int = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) _lowerCamelCase : Optional[Any] = max_revenue return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _lowerCamelCase : Any = 0 for i in range(1 , n + 1 ): _lowerCamelCase : Any = max_rev[i] for j in range(1 , i + 1 ): _lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) _lowerCamelCase : int = max_revenue_i return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' if n < 0: _lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): _lowerCamelCase : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ F"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = [6, 10, 12, 15, 20, 23] _lowerCamelCase : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCamelCase : Tuple = 36 _lowerCamelCase : Any = top_down_cut_rod(__A , __A ) _lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A ) _lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
15
0
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __A ): '''simple docstring''' if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X20000 and cp <= 0X2a6df) # or (cp >= 0X2a700 and cp <= 0X2b73f) # or (cp >= 0X2b740 and cp <= 0X2b81f) # or (cp >= 0X2b820 and cp <= 0X2ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2f800 and cp <= 0X2fa1f) # ): # return True return False def A__ ( __A ): '''simple docstring''' for char in word: _lowerCamelCase : Tuple = ord(lowerCAmelCase_ ) if not _is_chinese_char(lowerCAmelCase_ ): return 0 return 1 def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = set() for token in tokens: _lowerCamelCase : Tuple = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ ) if chinese_word: word_set.add(lowerCAmelCase_ ) _lowerCamelCase : Any = list(lowerCAmelCase_ ) return word_list def A__ ( __A , __A ): '''simple docstring''' if not chinese_word_set: return bert_tokens _lowerCamelCase : str = max([len(lowerCAmelCase_ ) for w in chinese_word_set] ) _lowerCamelCase : Tuple = bert_tokens _lowerCamelCase : Dict = 0, len(lowerCAmelCase_ ) while start < end: _lowerCamelCase : Any = True if is_chinese(bert_word[start] ): _lowerCamelCase : int = min(end - start , lowerCAmelCase_ ) for i in range(lowerCAmelCase_ , 1 , -1 ): _lowerCamelCase : int = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _lowerCamelCase : Optional[int] = '''##''' + bert_word[j] _lowerCamelCase : Dict = start + i _lowerCamelCase : Dict = False break if single_word: start += 1 return bert_word def A__ ( __A , __A , __A ): '''simple docstring''' _lowerCamelCase : List[Any] = [] for i in range(0 , len(lowerCAmelCase_ ) , 100 ): _lowerCamelCase : Any = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws _lowerCamelCase : List[Any] = [get_chinese_word(lowerCAmelCase_ ) for r in res] ltp_res.extend(lowerCAmelCase_ ) assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) _lowerCamelCase : Dict = [] for i in range(0 , len(lowerCAmelCase_ ) , 100 ): _lowerCamelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) _lowerCamelCase : Optional[Any] = [] for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_ ): _lowerCamelCase : Dict = [] for id in input_ids: _lowerCamelCase : Optional[int] = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ ) input_tokens.append(lowerCAmelCase_ ) _lowerCamelCase : Dict = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_ ) _lowerCamelCase : Tuple = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCAmelCase_ ): if token[:2] == "##": _lowerCamelCase : Tuple = token[2:] # save chinese tokens' pos if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ): ref_id.append(lowerCAmelCase_ ) ref_ids.append(lowerCAmelCase_ ) assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) return ref_ids def A__ ( __A ): '''simple docstring''' with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: _lowerCamelCase : int = f.readlines() _lowerCamelCase : int = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _lowerCamelCase : Union[str, Any] = LTP(args.ltp ) # faster in GPU device _lowerCamelCase : Any = BertTokenizer.from_pretrained(args.bert ) _lowerCamelCase : Tuple = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: _lowerCamelCase : Dict = [json.dumps(lowerCAmelCase_ ) + '''\n''' for ref in ref_ids] f.writelines(lowerCAmelCase_ ) if __name__ == "__main__": lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() main(args)
702
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : List[str] ={"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} lowerCAmelCase : Optional[Any] ={ "vocab_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", }, "emoji_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", }, } lowerCAmelCase : str ={ "abeja/gpt-neox-japanese-2.7b": 2048, } def A__ ( __A , __A ): '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f: _lowerCamelCase : int = json.loads(f.read() ) _lowerCamelCase : int = collections.OrderedDict() _lowerCamelCase : Optional[int] = collections.OrderedDict() _lowerCamelCase : int = collections.OrderedDict() with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f: _lowerCamelCase : Optional[Any] = f.readlines() _lowerCamelCase : Any = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token] for idx, b in enumerate(SCREAMING_SNAKE_CASE__ ): _lowerCamelCase : Dict = b _lowerCamelCase : List[Any] = idx for wd in b: _lowerCamelCase : Optional[Any] = idx return vocab, raw_vocab, ids_to_tokens, emoji class __snake_case ( _UpperCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]="<|endoftext|>" , _UpperCamelCase : Optional[int]="<|endoftext|>" , _UpperCamelCase : List[str]="<|startoftext|>" , _UpperCamelCase : Optional[int]="<|endoftext|>" , _UpperCamelCase : Tuple=False , **_UpperCamelCase : Dict , ) ->Any: """simple docstring""" super().__init__( unk_token=lowercase__ , pad_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , do_clean_text=lowercase__ , **lowercase__ , ) if not os.path.isfile(lowercase__): raise ValueError( F"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained""" """ model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""") if not os.path.isfile(lowercase__): raise ValueError( F"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google""" """ pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""") _lowerCamelCase : Optional[int] = do_clean_text _lowerCamelCase : Union[str, Any] = load_vocab_and_emoji(lowercase__ , lowercase__) _lowerCamelCase : Tuple = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji) @property def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict: """simple docstring""" return len(self.raw_vocab) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]: """simple docstring""" return dict(self.raw_vocab , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Any) ->Union[str, Any]: """simple docstring""" return self.subword_tokenizer.tokenize(lowercase__ , clean=self.do_clean_text) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Union[str, Any]) ->Optional[int]: """simple docstring""" return self.vocab.get(lowercase__ , self.vocab.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str]) ->Optional[int]: """simple docstring""" return self.subword_tokenizer.convert_id_to_token(lowercase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any]) ->int: """simple docstring""" _lowerCamelCase : Union[str, Any] = """""".join(lowercase__).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Tuple = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__) + [self.eos_token_id]) if len(lowercase__) > self.model_max_length: _lowerCamelCase : Dict = input_ids[-self.model_max_length :] return input_ids def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict = None) ->Optional[Any]: """simple docstring""" _lowerCamelCase : int = 0 if os.path.isdir(lowercase__): _lowerCamelCase : Optional[int] = os.path.join( lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) _lowerCamelCase : Tuple = os.path.join( lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""]) else: _lowerCamelCase : Optional[Any] = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCamelCase : Union[str, Any] = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""] ) with open(lowercase__ , """w""" , encoding="""utf-8""") as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" """ Please check that the vocabulary is not corrupted!""") _lowerCamelCase : int = token_index writer.write(""",""".join(lowercase__) + """\n""") index += 1 with open(lowercase__ , """w""" , encoding="""utf-8""") as writer: json.dump(self.emoji , lowercase__) return vocab_file, emoji_file class __snake_case ( _UpperCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : int) ->str: """simple docstring""" _lowerCamelCase : int = vocab # same as swe _lowerCamelCase : Tuple = ids_to_tokens # same as bpe _lowerCamelCase : Any = emoji _lowerCamelCase : Any = np.max([len(lowercase__) for w in self.vocab.keys()]) _lowerCamelCase : Optional[int] = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""") _lowerCamelCase : Dict = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""") _lowerCamelCase : Optional[Any] = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""") _lowerCamelCase : Union[str, Any] = re.compile( R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""") _lowerCamelCase : List[Any] = re.compile( R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""") _lowerCamelCase : str = re.compile( R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""") _lowerCamelCase : int = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿""" _lowerCamelCase : str = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟""" _lowerCamelCase : Any = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks}) def __len__( self : Any) ->Optional[Any]: """simple docstring""" return len(self.ids_to_tokens) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[str]) ->Dict: """simple docstring""" _lowerCamelCase : Any = self.content_repattera.sub("""<URL>""" , lowercase__) _lowerCamelCase : Tuple = self.content_repattera.sub("""<EMAIL>""" , lowercase__) _lowerCamelCase : Union[str, Any] = self.content_repattera.sub("""<TEL>""" , lowercase__) _lowerCamelCase : Dict = self.content_repattera.sub("""<DATE>""" , lowercase__) _lowerCamelCase : Tuple = self.content_repattera.sub("""<DATE>""" , lowercase__) _lowerCamelCase : Tuple = self.content_repattera.sub("""<PRICE>""" , lowercase__) _lowerCamelCase : Optional[Any] = content.translate(self.content_transa) while "<BLOCK><BLOCK>" in content: _lowerCamelCase : Tuple = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""") return content def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Any=False) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Optional[int] = text.replace(""" """ , """<SP>""") _lowerCamelCase : Optional[int] = text.replace(""" """ , """<SP>""") _lowerCamelCase : Any = text.replace("""\r\n""" , """<BR>""") _lowerCamelCase : Dict = text.replace("""\n""" , """<BR>""") _lowerCamelCase : Optional[Any] = text.replace("""\r""" , """<BR>""") _lowerCamelCase : Optional[Any] = text.replace("""\t""" , """<TAB>""") _lowerCamelCase : List[str] = text.replace("""—""" , """ー""") _lowerCamelCase : Union[str, Any] = text.replace("""−""" , """ー""") for k, v in self.emoji["emoji"].items(): if k in text: _lowerCamelCase : Union[str, Any] = text.replace(lowercase__ , lowercase__) if clean: _lowerCamelCase : Optional[Any] = self.clean_text(lowercase__) def check_simbol(_UpperCamelCase : Any): _lowerCamelCase : int = x.encode() if len(lowercase__) == 1 and len(lowercase__) == 2: _lowerCamelCase : Tuple = (int(e[0]) << 8) + int(e[1]) if ( (c >= 0xc2a1 and c <= 0xc2bf) or (c >= 0xc780 and c <= 0xc783) or (c >= 0xcab9 and c <= 0xcbbf) or (c >= 0xcc80 and c <= 0xcda2) ): return True return False def checkuae(_UpperCamelCase : List[str]): _lowerCamelCase : int = x.encode() if len(lowercase__) == 1 and len(lowercase__) == 3: _lowerCamelCase : Union[str, Any] = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2]) if c >= 0xe2_8080 and c <= 0xe2_b07f: return True return False _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : List[str] = [] while pos < len(lowercase__): _lowerCamelCase : Tuple = min(len(lowercase__) , pos + self.maxlen + 1) if text[pos] == """<""" else pos + 3 _lowerCamelCase : Optional[Any] = [] # (token_id, token, pos) for e in range(lowercase__ , lowercase__ , -1): _lowerCamelCase : str = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(lowercase__) > 2: _lowerCamelCase : Optional[Any] = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e)) if len(lowercase__) > 0: # the smallest token_id is adopted _lowerCamelCase : Optional[int] = sorted(lowercase__ , key=lambda _UpperCamelCase: x[0])[0] result.append(lowercase__) _lowerCamelCase : Union[str, Any] = e else: _lowerCamelCase : Optional[Any] = pos + 1 _lowerCamelCase : Tuple = text[pos:end] if check_simbol(lowercase__): result.append("""<KIGOU>""") elif checkuae(lowercase__): result.append("""<U2000U2BFF>""") else: for i in wd.encode("""utf-8"""): result.append("""<|byte%d|>""" % i) _lowerCamelCase : Tuple = end return result def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any="\n") ->Dict: """simple docstring""" _lowerCamelCase : Optional[Any] = [] _lowerCamelCase : Optional[Any] = [] _lowerCamelCase : Dict = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2])) else: if len(lowercase__) > 0: words.append(bytearray(lowercase__).decode("""utf-8""" , errors="""replace""")) _lowerCamelCase : Tuple = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["""emoji_inv"""][word]) elif word == "<SP>": words.append(""" """) elif word == "<BR>": words.append(lowercase__) elif word == "<TAB>": words.append("""\t""") elif word == "<BLOCK>": words.append("""▀""") elif word == "<KIGOU>": words.append("""ǀ""") elif word == "<U2000U2BFF>": words.append("""‖""") else: words.append(lowercase__) if len(lowercase__) > 0: words.append(bytearray(lowercase__).decode("""utf-8""" , errors="""replace""")) _lowerCamelCase : str = """""".join(lowercase__) return text
703
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
0
from __future__ import annotations lowerCAmelCase : List[str] = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } class __snake_case : '''simple docstring''' def __init__( self : Optional[Any] , _UpperCamelCase : dict[str, list[str]] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _lowerCamelCase : str = graph # mapping node to its parent in resulting breadth first tree _lowerCamelCase : dict[str, str | None] = {} _lowerCamelCase : List[Any] = source_vertex def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : Dict = {self.source_vertex} _lowerCamelCase : Dict = None _lowerCamelCase : Tuple = [self.source_vertex] # first in first out queue while queue: _lowerCamelCase : List[str] = queue.pop(0) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__snake_case) _lowerCamelCase : int = vertex queue.append(__snake_case) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" if target_vertex == self.source_vertex: return self.source_vertex _lowerCamelCase : str = self.parent.get(__snake_case) if target_vertex_parent is None: _lowerCamelCase : List[Any] = ( F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}""" ) raise ValueError(__snake_case) return self.shortest_path(__snake_case) + F"""->{target_vertex}""" if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = Graph(graph, "G") g.breath_first_search() print(g.shortest_path("D")) print(g.shortest_path("G")) print(g.shortest_path("Foo"))
704
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Any =logging.get_logger(__name__) lowerCAmelCase : str ="""▁""" lowerCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} lowerCAmelCase : Dict ={ """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } lowerCAmelCase : Optional[int] ={"""vinai/bartpho-syllable""": 1024} class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int="<s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : str="<s>" , _UpperCamelCase : Optional[int]="<unk>" , _UpperCamelCase : str="<pad>" , _UpperCamelCase : List[Any]="<mask>" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Optional[int] , ) ->None: """simple docstring""" _lowerCamelCase : Optional[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else mask_token _lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) _lowerCamelCase : Dict = vocab_file _lowerCamelCase : List[str] = monolingual_vocab_file _lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(_UpperCamelCase)) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCamelCase : Union[str, Any] = {} _lowerCamelCase : Dict = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_UpperCamelCase) not in self.fairseq_tokens_to_ids: _lowerCamelCase : Dict = cnt cnt += 1 with open(_UpperCamelCase , """r""" , encoding="""utf-8""") as f: for line in f.readlines(): _lowerCamelCase : Optional[Any] = line.strip().split()[0] _lowerCamelCase : Optional[int] = len(self.fairseq_tokens_to_ids) if str(_UpperCamelCase) not in self.fairseq_tokens_to_ids: _lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids) _lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Tuple = self.__dict__.copy() _lowerCamelCase : Optional[int] = None _lowerCamelCase : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self : int , _UpperCamelCase : str) ->List[Any]: """simple docstring""" _lowerCamelCase : Dict = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _lowerCamelCase : int = {} _lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCamelCase : List[str] = [self.cls_token_id] _lowerCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase)) + [1] return [1] + ([0] * len(_UpperCamelCase)) + [1, 1] + ([0] * len(_UpperCamelCase)) + [1] def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]: """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.sep_token_id] _lowerCamelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: """simple docstring""" return len(self.fairseq_ids_to_tokens) def _SCREAMING_SNAKE_CASE ( self : Dict) ->int: """simple docstring""" _lowerCamelCase : Any = {self.convert_ids_to_tokens(_UpperCamelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str) ->List[str]: """simple docstring""" return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any]) ->List[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : List[str]) ->Union[str, Any]: """simple docstring""" return self.fairseq_ids_to_tokens[index] def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[Any] = """""".join(_UpperCamelCase).replace(_UpperCamelCase , """ """).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return _lowerCamelCase : List[str] = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) _lowerCamelCase : List[Any] = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCamelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _UpperCamelCase) elif not os.path.isfile(self.vocab_file): with open(_UpperCamelCase , """wb""") as fi: _lowerCamelCase : str = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( _UpperCamelCase) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file , _UpperCamelCase) elif not os.path.isfile(self.monolingual_vocab_file): with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F"""{str(_UpperCamelCase)} \n""") return out_vocab_file, out_monolingual_vocab_file
705
from __future__ import annotations lowerCAmelCase : int =[] def A__ ( __A , __A , __A ): '''simple docstring''' for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def A__ ( __A , __A ): '''simple docstring''' if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): _lowerCamelCase : int = 1 solve(__A , row + 1 ) _lowerCamelCase : List[str] = 0 return False def A__ ( __A ): '''simple docstring''' for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) lowerCAmelCase : int =8 lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
15
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[int] ={ "configuration_informer": [ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] =[ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
706
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
0
import glob import os import random from string import ascii_lowercase, digits import cva lowerCAmelCase : Any ="" lowerCAmelCase : Any ="" lowerCAmelCase : Optional[int] ="" lowerCAmelCase : List[str] =1 # (0 is vertical, 1 is horizontal) def A__ ( ): _lowerCamelCase , _lowerCamelCase : Optional[Any] = get_dataset(__UpperCamelCase , __UpperCamelCase ) print("""Processing...""" ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = update_image_and_anno(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for index, image in enumerate(__UpperCamelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowerCamelCase : int = random_chars(32 ) _lowerCamelCase : str = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] _lowerCamelCase : Dict = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(F"""/{file_root}.jpg""" , __UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Success {index+1}/{len(__UpperCamelCase )} with {file_name}""" ) _lowerCamelCase : Tuple = [] for anno in new_annos[index]: _lowerCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(__UpperCamelCase ) with open(F"""/{file_root}.txt""" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def A__ ( __A , __A ): _lowerCamelCase : Any = [] _lowerCamelCase : Union[str, Any] = [] for label_file in glob.glob(os.path.join(__UpperCamelCase , """*.txt""" ) ): _lowerCamelCase : Union[str, Any] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__UpperCamelCase ) as in_file: _lowerCamelCase : Union[str, Any] = in_file.readlines() _lowerCamelCase : Dict = os.path.join(__UpperCamelCase , F"""{label_name}.jpg""" ) _lowerCamelCase : List[str] = [] for obj_list in obj_lists: _lowerCamelCase : List[Any] = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__UpperCamelCase ) labels.append(__UpperCamelCase ) return img_paths, labels def A__ ( __A , __A , __A = 1 ): _lowerCamelCase : List[Any] = [] _lowerCamelCase : Dict = [] _lowerCamelCase : Any = [] for idx in range(len(__UpperCamelCase ) ): _lowerCamelCase : str = [] _lowerCamelCase : Dict = img_list[idx] path_list.append(__UpperCamelCase ) _lowerCamelCase : List[str] = anno_list[idx] _lowerCamelCase : List[str] = cva.imread(__UpperCamelCase ) if flip_type == 1: _lowerCamelCase : List[Any] = cva.flip(__UpperCamelCase , __UpperCamelCase ) for bbox in img_annos: _lowerCamelCase : Optional[int] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _lowerCamelCase : int = cva.flip(__UpperCamelCase , __UpperCamelCase ) for bbox in img_annos: _lowerCamelCase : List[Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__UpperCamelCase ) new_imgs_list.append(__UpperCamelCase ) return new_imgs_list, new_annos_lists, path_list def A__ ( __A = 32 ): assert number_char > 1, "The number of character should greater than 1" _lowerCamelCase : Union[str, Any] = ascii_lowercase + digits return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
707
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = 0 for ch in input_str: _lowerCamelCase : Optional[Any] = ord(__A ) _lowerCamelCase : List[str] = pow(2 , __A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
15
0
'''simple docstring''' import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class __snake_case : '''simple docstring''' def __init__( self : int , _UpperCamelCase : Dict , _UpperCamelCase : Any=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : Any=None , _UpperCamelCase : int="resnet50" , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Union[str, Any]=True , ) ->Optional[int]: """simple docstring""" _lowerCamelCase : int = parent _lowerCamelCase : str = out_indices if out_indices is not None else [4] _lowerCamelCase : Optional[int] = stage_names _lowerCamelCase : int = out_features _lowerCamelCase : Optional[int] = backbone _lowerCamelCase : Any = batch_size _lowerCamelCase : List[str] = image_size _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : Any = use_pretrained_backbone _lowerCamelCase : Optional[int] = is_training def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int: """simple docstring""" _lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCamelCase : Optional[Any] = self.get_config() return config, pixel_values def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : int) ->List[str]: """simple docstring""" _lowerCamelCase : str = TimmBackbone(config=_lowerCAmelCase) model.to(_lowerCAmelCase) model.eval() with torch.no_grad(): _lowerCamelCase : Dict = model(_lowerCAmelCase) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]: """simple docstring""" _lowerCamelCase : Any = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase : str = config_and_inputs _lowerCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class __snake_case ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = (TimmBackbone,) if is_torch_available() else () _snake_case = {'feature-extraction': TimmBackbone} if is_torch_available() else {} _snake_case = False _snake_case = False _snake_case = False _snake_case = False def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]: """simple docstring""" _lowerCamelCase : Union[str, Any] = TimmBackboneModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase) def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : str = """resnet18""" _lowerCamelCase : List[Any] = """microsoft/resnet-18""" _lowerCamelCase : int = AutoBackbone.from_pretrained(_lowerCAmelCase , use_timm_backbone=_lowerCAmelCase) _lowerCamelCase : int = AutoBackbone.from_pretrained(_lowerCAmelCase) self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names)) self.assertEqual(timm_model.channels , transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,)) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1]) _lowerCamelCase : Union[str, Any] = AutoBackbone.from_pretrained(_lowerCAmelCase , use_timm_backbone=_lowerCAmelCase , out_indices=[1, 2, 3]) _lowerCamelCase : str = AutoBackbone.from_pretrained(_lowerCAmelCase , out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices , transformers_model.out_indices) self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features)) self.assertEqual(timm_model.channels , transformers_model.channels) @unittest.skip("""TimmBackbone doesn't support feed forward chunking""") def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""") def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone initialization is managed on the timm side""") def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""") def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""") def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""") def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""") def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""") def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""") def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""") def _SCREAMING_SNAKE_CASE ( self : Dict) ->str: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""") def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""") def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't support output_attentions.""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" pass @unittest.skip("""Safetensors is not supported by timm.""") def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(_lowerCAmelCase) _lowerCamelCase : int = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Tuple = [*signature.parameters.keys()] _lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: """simple docstring""" _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : List[Any] = True _lowerCamelCase : List[str] = self.has_attentions # no need to test all models as different heads yield the same functionality _lowerCamelCase : List[str] = self.all_model_classes[0] _lowerCamelCase : Optional[int] = model_class(_lowerCAmelCase) model.to(_lowerCAmelCase) _lowerCamelCase : Optional[Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase) _lowerCamelCase : List[str] = model(**_lowerCAmelCase) _lowerCamelCase : List[Any] = outputs[0][-1] # Encoder-/Decoder-only models _lowerCamelCase : Union[str, Any] = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: _lowerCamelCase : Optional[Any] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=_lowerCAmelCase) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Optional[Any] = model_class(_lowerCAmelCase) model.to(_lowerCAmelCase) model.eval() _lowerCamelCase : int = model(**_lowerCAmelCase) self.assertEqual(len(result.feature_maps) , len(config.out_indices)) self.assertEqual(len(model.channels) , len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None _lowerCamelCase : Dict = copy.deepcopy(_lowerCAmelCase) _lowerCamelCase : Dict = None _lowerCamelCase : Any = model_class(_lowerCAmelCase) model.to(_lowerCAmelCase) model.eval() _lowerCamelCase : int = model(**_lowerCAmelCase) self.assertEqual(len(result.feature_maps) , 1) self.assertEqual(len(model.channels) , 1) # Check backbone can be initialized with fresh weights _lowerCamelCase : Optional[int] = copy.deepcopy(_lowerCAmelCase) _lowerCamelCase : Dict = False _lowerCamelCase : int = model_class(_lowerCAmelCase) model.to(_lowerCAmelCase) model.eval() _lowerCamelCase : Optional[Any] = model(**_lowerCAmelCase)
708
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""") _lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : str = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""") _lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
15
0
from datetime import datetime as dt import os from github import Github lowerCAmelCase : int =[ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def A__ ( ): '''simple docstring''' _lowerCamelCase : int = Github(os.environ["""GITHUB_TOKEN"""] ) _lowerCamelCase : Tuple = g.get_repo("""huggingface/transformers""" ) _lowerCamelCase : Dict = repo.get_issues(state="""open""" ) for issue in open_issues: _lowerCamelCase : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __A : i.created_at , reverse=snake_case_ ) _lowerCamelCase : Dict = comments[0] if len(snake_case_ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="""closed""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
709
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
0
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' _snake_case = BertJapaneseTokenizer _snake_case = False _snake_case = True def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: """simple docstring""" super().setUp() _lowerCamelCase : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] _lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Any = '''こんにちは、世界。 \nこんばんは、世界。''' _lowerCamelCase : Dict = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Any) ->Tuple: """simple docstring""" _lowerCamelCase : Dict = self.get_input_output_texts(UpperCamelCase__) _lowerCamelCase : List[str] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__) _lowerCamelCase : Dict = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) return text, ids def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: """simple docstring""" pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Any) ->Any: """simple docstring""" _lowerCamelCase : int = self.tokenizer_class(self.vocab_file) _lowerCamelCase : Any = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""") self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14]) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""") self.assertIsNotNone(UpperCamelCase__) _lowerCamelCase : Dict = '''こんにちは、世界。\nこんばんは、世界。''' _lowerCamelCase : Dict = tokenizer.tokenize(UpperCamelCase__) self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14]) _lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , """tokenizer.bin""") with open(UpperCamelCase__ , """wb""") as handle: pickle.dump(UpperCamelCase__ , UpperCamelCase__) with open(UpperCamelCase__ , """rb""") as handle: _lowerCamelCase : Tuple = pickle.load(UpperCamelCase__) _lowerCamelCase : str = tokenizer_new.tokenize(UpperCamelCase__) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Dict = MecabTokenizer(mecab_dic="""ipadic""") self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: """simple docstring""" try: _lowerCamelCase : Tuple = MecabTokenizer(mecab_dic="""unidic_lite""") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: """simple docstring""" try: _lowerCamelCase : Optional[int] = MecabTokenizer(mecab_dic="""unidic""") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : Tuple = MecabTokenizer(do_lower_case=UpperCamelCase__ , mecab_dic="""ipadic""") self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" try: _lowerCamelCase : Dict = MecabTokenizer( do_lower_case=UpperCamelCase__ , normalize_text=UpperCamelCase__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""") except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Any: """simple docstring""" _lowerCamelCase : Optional[Any] = MecabTokenizer(normalize_text=UpperCamelCase__ , mecab_dic="""ipadic""") self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""") self.assertIsNotNone(UpperCamelCase__) _lowerCamelCase : int = '''こんにちは、世界。\nこんばんは、世界。''' _lowerCamelCase : Any = tokenizer.tokenize(UpperCamelCase__) self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14]) _lowerCamelCase : str = os.path.join(self.tmpdirname , """tokenizer.bin""") with open(UpperCamelCase__ , """wb""") as handle: pickle.dump(UpperCamelCase__ , UpperCamelCase__) with open(UpperCamelCase__ , """rb""") as handle: _lowerCamelCase : Optional[int] = pickle.load(UpperCamelCase__) _lowerCamelCase : Any = tokenizer_new.tokenize(UpperCamelCase__) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: """simple docstring""" _lowerCamelCase : str = SudachiTokenizer(sudachi_dict_type="""core""") self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""") self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国""", """人""", """参政""", """権"""]) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: """simple docstring""" _lowerCamelCase : Tuple = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""") self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人""", """参政権"""]) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : str = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""") self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人参政権"""]) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : Optional[int] = SudachiTokenizer(do_lower_case=UpperCamelCase__ , sudachi_dict_type="""core""") self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: """simple docstring""" _lowerCamelCase : Optional[int] = SudachiTokenizer(normalize_text=UpperCamelCase__ , sudachi_dict_type="""core""") self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = SudachiTokenizer(trim_whitespace=UpperCamelCase__ , sudachi_dict_type="""core""") self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""") self.assertIsNotNone(UpperCamelCase__) _lowerCamelCase : Dict = '''こんにちは、世界。\nこんばんは、世界。''' _lowerCamelCase : List[str] = tokenizer.tokenize(UpperCamelCase__) self.assertListEqual(UpperCamelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14]) _lowerCamelCase : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""") with open(UpperCamelCase__ , """wb""") as handle: pickle.dump(UpperCamelCase__ , UpperCamelCase__) with open(UpperCamelCase__ , """rb""") as handle: _lowerCamelCase : Dict = pickle.load(UpperCamelCase__) _lowerCamelCase : Any = tokenizer_new.tokenize(UpperCamelCase__) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" _lowerCamelCase : Any = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple: """simple docstring""" _lowerCamelCase : Union[str, Any] = JumanppTokenizer(do_lower_case=UpperCamelCase__) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Any) ->Any: """simple docstring""" _lowerCamelCase : int = JumanppTokenizer(normalize_text=UpperCamelCase__) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Dict = JumanppTokenizer(trim_whitespace=UpperCamelCase__) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : str = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""") , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: """simple docstring""" _lowerCamelCase : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] _lowerCamelCase : str = {} for i, token in enumerate(UpperCamelCase__): _lowerCamelCase : str = i _lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="""[UNK]""") self.assertListEqual(tokenizer.tokenize("""""") , []) self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こんにちは"""]) self.assertListEqual(tokenizer.tokenize("""こんばんは""") , ["""こん""", """##ばんは"""]) self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""") , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""]) def _SCREAMING_SNAKE_CASE ( self : str) ->int: """simple docstring""" _lowerCamelCase : Optional[Any] = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""") _lowerCamelCase : str = tokenizer.subword_tokenizer _lowerCamelCase : Tuple = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""") self.assertListEqual(UpperCamelCase__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""]) _lowerCamelCase : Tuple = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""") self.assertListEqual(UpperCamelCase__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""]) def _SCREAMING_SNAKE_CASE ( self : int) ->str: """simple docstring""" _lowerCamelCase : Dict = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""") _lowerCamelCase : Optional[int] = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCamelCase__) _lowerCamelCase : Any = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCamelCase__) _lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__) _lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' _snake_case = BertJapaneseTokenizer _snake_case = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" super().setUp() _lowerCamelCase : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] _lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) def _SCREAMING_SNAKE_CASE ( self : str , **_UpperCamelCase : str) ->Dict: """simple docstring""" return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **UpperCamelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]) ->Tuple: """simple docstring""" _lowerCamelCase : int = '''こんにちは、世界。 \nこんばんは、世界。''' _lowerCamelCase : Dict = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any: """simple docstring""" pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str: """simple docstring""" _lowerCamelCase : Tuple = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""") _lowerCamelCase : str = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""") self.assertListEqual( UpperCamelCase__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12]) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] _lowerCamelCase : Optional[int] = {} for i, token in enumerate(UpperCamelCase__): _lowerCamelCase : Optional[Any] = i _lowerCamelCase : Dict = CharacterTokenizer(vocab=UpperCamelCase__ , unk_token="""[UNK]""") self.assertListEqual(tokenizer.tokenize("""""") , []) self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こ""", """ん""", """に""", """ち""", """は"""]) self.assertListEqual(tokenizer.tokenize("""こんにちほ""") , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""]) def _SCREAMING_SNAKE_CASE ( self : str) ->Dict: """simple docstring""" _lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""") _lowerCamelCase : Any = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCamelCase__) _lowerCamelCase : str = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCamelCase__) _lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__) _lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" _lowerCamelCase : Tuple = '''cl-tohoku/bert-base-japanese''' _lowerCamelCase : str = AutoTokenizer.from_pretrained(UpperCamelCase__) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]: """simple docstring""" _lowerCamelCase : str = '''cl-tohoku/bert-base-japanese''' with self.assertLogs("""transformers""" , level="""WARNING""") as cm: BertTokenizer.from_pretrained(UpperCamelCase__) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""")) _lowerCamelCase : Any = '''bert-base-cased''' with self.assertLogs("""transformers""" , level="""WARNING""") as cm: BertJapaneseTokenizer.from_pretrained(UpperCamelCase__) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from."""))
710
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) lowerCAmelCase : str ={ "google/vivit-b-16x2-kinetics400": ( "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class __snake_case ( lowerCAmelCase__ ): '''simple docstring''' _snake_case = "vivit" def __init__( self : int , _UpperCamelCase : Tuple=224 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : List[Any]=[2, 16, 16] , _UpperCamelCase : Any=3 , _UpperCamelCase : List[Any]=768 , _UpperCamelCase : List[Any]=12 , _UpperCamelCase : Tuple=12 , _UpperCamelCase : Tuple=3072 , _UpperCamelCase : List[str]="gelu_fast" , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : int=0.0 , _UpperCamelCase : List[Any]=0.0_2 , _UpperCamelCase : Tuple=1E-0_6 , _UpperCamelCase : Any=True , **_UpperCamelCase : Dict , ) ->List[str]: """simple docstring""" _lowerCamelCase : Tuple = hidden_size _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Dict = intermediate_size _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[Any] = layer_norm_eps _lowerCamelCase : Optional[int] = image_size _lowerCamelCase : Union[str, Any] = num_frames _lowerCamelCase : List[Any] = tubelet_size _lowerCamelCase : Optional[Any] = num_channels _lowerCamelCase : Optional[int] = qkv_bias super().__init__(**_lowerCamelCase)
711
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
0
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class __snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] = True , _UpperCamelCase : Any = None , _UpperCamelCase : Any = 32 , _UpperCamelCase : Optional[int] = True , _UpperCamelCase : Union[str, Any] = 1 / 255 , _UpperCamelCase : Union[str, Any] = True , _UpperCamelCase : List[Any] = True , _UpperCamelCase : List[Any] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _UpperCamelCase : Tuple = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _UpperCamelCase : Optional[int] = True , _UpperCamelCase : List[str]=7 , _UpperCamelCase : int=30 , _UpperCamelCase : Any=400 , _UpperCamelCase : Dict=3 , ) ->Any: """simple docstring""" _lowerCamelCase : Union[str, Any] = parent _lowerCamelCase : str = do_resize _lowerCamelCase : str = size if size is not None else {"shortest_edge": 288} _lowerCamelCase : Tuple = size_divisor _lowerCamelCase : Optional[int] = do_rescale _lowerCamelCase : Tuple = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : List[str] = do_center_crop _lowerCamelCase : List[Any] = image_mean _lowerCamelCase : Optional[int] = image_std _lowerCamelCase : Any = do_pad _lowerCamelCase : List[str] = batch_size _lowerCamelCase : Tuple = num_channels _lowerCamelCase : List[str] = min_resolution _lowerCamelCase : Union[str, Any] = max_resolution def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple=False) ->Optional[int]: """simple docstring""" if not batched: _lowerCamelCase : int = self.size["shortest_edge"] _lowerCamelCase : List[str] = image_inputs[0] if isinstance(_UpperCamelCase , Image.Image): _lowerCamelCase : Optional[Any] = image.size else: _lowerCamelCase : Dict = image.shape[1], image.shape[2] _lowerCamelCase : Any = size / min(_UpperCamelCase , _UpperCamelCase) if h < w: _lowerCamelCase : Union[str, Any] = size, scale * w else: _lowerCamelCase : Dict = scale * h, size _lowerCamelCase : Union[str, Any] = int((1333 / 800) * size) if max(_UpperCamelCase , _UpperCamelCase) > max_size: _lowerCamelCase : int = max_size / max(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Tuple = newh * scale _lowerCamelCase : List[Any] = neww * scale _lowerCamelCase : Optional[int] = int(newh + 0.5), int(neww + 0.5) _lowerCamelCase : Dict = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: _lowerCamelCase : Union[str, Any] = [] for image in image_inputs: _lowerCamelCase : Optional[Any] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) _lowerCamelCase : Tuple = max(_UpperCamelCase , key=lambda _UpperCamelCase: item[0])[0] _lowerCamelCase : List[Any] = max(_UpperCamelCase , key=lambda _UpperCamelCase: item[1])[1] return expected_height, expected_width @require_torch @require_vision class __snake_case ( _A , unittest.TestCase ): '''simple docstring''' _snake_case = BridgeTowerImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: """simple docstring""" _lowerCamelCase : str = BridgeTowerImageProcessingTester(self) @property def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_UpperCamelCase , """image_mean""")) self.assertTrue(hasattr(_UpperCamelCase , """image_std""")) self.assertTrue(hasattr(_UpperCamelCase , """do_normalize""")) self.assertTrue(hasattr(_UpperCamelCase , """do_resize""")) self.assertTrue(hasattr(_UpperCamelCase , """size""")) self.assertTrue(hasattr(_UpperCamelCase , """size_divisor""")) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images _lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image) # Test not batched input _lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(_UpperCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase : Optional[Any] = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" _lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , np.ndarray) # Test not batched input _lowerCamelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values _lowerCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase : Optional[int] = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]: """simple docstring""" _lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , torch.Tensor) # Test not batched input _lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values _lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase : str = image_processing(_UpperCamelCase , return_tensors="""pt""").pixel_values _lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
712
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
0
class __snake_case : '''simple docstring''' def __init__( self : str) ->List[str]: """simple docstring""" _lowerCamelCase : Tuple = """""" _lowerCamelCase : int = """""" _lowerCamelCase : Union[str, Any] = [] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Any) ->int: """simple docstring""" if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: _lowerCamelCase : str = self.__min_dist_top_down_dp(m - 1 , n - 1) else: _lowerCamelCase : int = self.__min_dist_top_down_dp(__A , n - 1) _lowerCamelCase : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , __A) _lowerCamelCase : int = self.__min_dist_top_down_dp(m - 1 , n - 1) _lowerCamelCase : int = 1 + min(__A , __A , __A) return self.dp[m][n] def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Any) ->int: """simple docstring""" _lowerCamelCase : Dict = worda _lowerCamelCase : List[str] = worda _lowerCamelCase : Optional[Any] = [[-1 for _ in range(len(__A))] for _ in range(len(__A))] return self.__min_dist_top_down_dp(len(__A) - 1 , len(__A) - 1) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : List[Any]) ->int: """simple docstring""" _lowerCamelCase : Any = worda _lowerCamelCase : Dict = worda _lowerCamelCase : Union[str, Any] = len(__A) _lowerCamelCase : Union[str, Any] = len(__A) _lowerCamelCase : Union[str, Any] = [[0 for _ in range(n + 1)] for _ in range(m + 1)] for i in range(m + 1): for j in range(n + 1): if i == 0: # first string is empty _lowerCamelCase : int = j elif j == 0: # second string is empty _lowerCamelCase : List[str] = i elif worda[i - 1] == worda[j - 1]: # last characters are equal _lowerCamelCase : Optional[Any] = self.dp[i - 1][j - 1] else: _lowerCamelCase : str = self.dp[i][j - 1] _lowerCamelCase : Optional[Any] = self.dp[i - 1][j] _lowerCamelCase : Any = self.dp[i - 1][j - 1] _lowerCamelCase : List[Any] = 1 + min(__A , __A , __A) return self.dp[m][n] if __name__ == "__main__": lowerCAmelCase : Any =EditDistance() print("****************** Testing Edit Distance DP Algorithm ******************") print() lowerCAmelCase : Union[str, Any] =input("Enter the first string: ").strip() lowerCAmelCase : List[Any] =input("Enter the second string: ").strip() print() print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""") print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""") print() print("*************** End of Testing Edit Distance DP Algorithm ***************")
713
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = {} _lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""] _lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments) lowerCAmelCase : int =parser.parse_args() if args.num_workers is None: lowerCAmelCase : Any =multiprocessing.cpu_count() lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase : str =time.time() lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase : Dict =time.time() lowerCAmelCase : Dict =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
15
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : str ={ "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class __snake_case ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _snake_case = 'focalnet' def __init__( self : Optional[int] , _UpperCamelCase : str=224 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Optional[Any]=96 , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Union[str, Any]=[192, 384, 768, 768] , _UpperCamelCase : Dict=[2, 2, 6, 2] , _UpperCamelCase : Union[str, Any]=[2, 2, 2, 2] , _UpperCamelCase : str=[3, 3, 3, 3] , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : str=0.1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[Any]=1E-4 , _UpperCamelCase : List[str]=False , _UpperCamelCase : str=False , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[int]=0.0_2 , _UpperCamelCase : Union[str, Any]=1E-5 , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : str=None , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]: """simple docstring""" super().__init__(**__A) _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : Dict = patch_size _lowerCamelCase : str = num_channels _lowerCamelCase : int = embed_dim _lowerCamelCase : Optional[Any] = use_conv_embed _lowerCamelCase : List[str] = hidden_sizes _lowerCamelCase : str = depths _lowerCamelCase : Tuple = focal_levels _lowerCamelCase : Any = focal_windows _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Tuple = mlp_ratio _lowerCamelCase : Optional[int] = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = drop_path_rate _lowerCamelCase : List[Any] = use_layerscale _lowerCamelCase : str = layerscale_value _lowerCamelCase : Any = use_post_layernorm _lowerCamelCase : List[Any] = use_post_layernorm_in_modulation _lowerCamelCase : List[Any] = normalize_modulator _lowerCamelCase : str = initializer_range _lowerCamelCase : List[str] = layer_norm_eps _lowerCamelCase : int = encoder_stride _lowerCamelCase : List[str] = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)] _lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices( out_features=__A , out_indices=__A , stage_names=self.stage_names)
714
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = IFPipeline _snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" return self._get_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : int = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa) _lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""") _lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""") del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCamelCase : str = None _lowerCamelCase : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components) _lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components) _lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : str = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Any = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : List[str] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def A__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
0
from __future__ import annotations import requests def A__ ( __A ): '''simple docstring''' _lowerCamelCase : List[str] = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(_UpperCAmelCase ).json() def A__ ( __A = 10 ): '''simple docstring''' _lowerCamelCase : Tuple = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty""" _lowerCamelCase : Dict = requests.get(_UpperCAmelCase ).json()[:max_stories] return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids] def A__ ( __A = 10 ): '''simple docstring''' _lowerCamelCase : str = hackernews_top_stories(_UpperCAmelCase ) return "\n".join("""* [{title}]({url})""".format(**_UpperCAmelCase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
715
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Any =logging.get_logger(__name__) lowerCAmelCase : List[Any] ={ "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = 'swin' _snake_case = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[str] = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : Union[str, Any] = embed_dim _lowerCamelCase : str = depths _lowerCamelCase : str = len(_UpperCamelCase) _lowerCamelCase : Optional[Any] = num_heads _lowerCamelCase : Tuple = window_size _lowerCamelCase : int = mlp_ratio _lowerCamelCase : Optional[int] = qkv_bias _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Dict = use_absolute_embeddings _lowerCamelCase : int = layer_norm_eps _lowerCamelCase : str = initializer_range _lowerCamelCase : Dict = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1)) _lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)] _lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices( out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = version.parse('1.11' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float: """simple docstring""" return 1E-4
15
0
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __snake_case ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: """simple docstring""" torch.manual_seed(0) _lowerCamelCase : Any = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet _lowerCamelCase : int = ScoreSdeVeScheduler() _lowerCamelCase : int = ScoreSdeVePipeline(unet=__a , scheduler=__a) sde_ve.to(__a) sde_ve.set_progress_bar_config(disable=__a) _lowerCamelCase : Tuple = torch.manual_seed(0) _lowerCamelCase : Any = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__a).images _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : Dict = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__a , return_dict=__a)[ 0 ] _lowerCamelCase : str = image[0, -3:, -3:, -1] _lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCamelCase : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: """simple docstring""" _lowerCamelCase : Tuple = 'google/ncsnpp-church-256' _lowerCamelCase : Dict = UNetaDModel.from_pretrained(__a) _lowerCamelCase : List[Any] = ScoreSdeVeScheduler.from_pretrained(__a) _lowerCamelCase : Tuple = ScoreSdeVePipeline(unet=__a , scheduler=__a) sde_ve.to(__a) sde_ve.set_progress_bar_config(disable=__a) _lowerCamelCase : Optional[int] = torch.manual_seed(0) _lowerCamelCase : Optional[Any] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__a).images _lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCamelCase : Optional[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
716
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = (EulerDiscreteScheduler,) _snake_case = 10 def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase) return config def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : str = torch.manual_seed(0) _lowerCamelCase : str = self.dummy_model() _lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : int = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Dict = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : int = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""") _lowerCamelCase : int = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCamelCase : Dict = sample.to(_UpperCamelCase) for i, t in enumerate(scheduler.timesteps): _lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : Tuple = output.prev_sample _lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Tuple = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : List[Any] = output.prev_sample _lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase) _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase) for t in scheduler.timesteps: _lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase) _lowerCamelCase : int = output.prev_sample _lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase)) _lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase)) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
15
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int]=7 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Tuple=18 , _UpperCamelCase : Dict=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : int=True , _UpperCamelCase : str=None , _UpperCamelCase : List[str]=True , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : str=[0.5, 0.5, 0.5] , ) ->Dict: """simple docstring""" _lowerCamelCase : List[str] = size if size is not None else {"""shortest_edge""": 18} _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _lowerCamelCase : Optional[int] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : int = num_channels _lowerCamelCase : List[Any] = image_size _lowerCamelCase : Tuple = min_resolution _lowerCamelCase : Any = max_resolution _lowerCamelCase : str = do_resize _lowerCamelCase : Any = size _lowerCamelCase : List[str] = do_center_crop _lowerCamelCase : List[str] = crop_size _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : str = image_mean _lowerCamelCase : Optional[Any] = image_std def _SCREAMING_SNAKE_CASE ( self : int) ->List[str]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __snake_case ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = LevitImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = LevitImageProcessingTester(self) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__A , """image_mean""")) self.assertTrue(hasattr(__A , """image_std""")) self.assertTrue(hasattr(__A , """do_normalize""")) self.assertTrue(hasattr(__A , """do_resize""")) self.assertTrue(hasattr(__A , """do_center_crop""")) self.assertTrue(hasattr(__A , """size""")) def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" _lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""shortest_edge""": 18}) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18}) _lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {"""shortest_edge""": 42}) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84}) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images _lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A) for image in image_inputs: self.assertIsInstance(__A , Image.Image) # Test not batched input _lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCamelCase : List[Any] = image_processing(__A , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A) for image in image_inputs: self.assertIsInstance(__A , np.ndarray) # Test not batched input _lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCamelCase : Optional[Any] = image_processing(__A , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->str: """simple docstring""" _lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor) # Test not batched input _lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _lowerCamelCase : List[Any] = image_processing(__A , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
717
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : Dict ={"vocab_file": "vocab.json"} lowerCAmelCase : List[str] ={ "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } lowerCAmelCase : int ={"mgp-str": 27} class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]: """simple docstring""" super().__init__( unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , ) with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle: _lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase) _lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()} @property def _SCREAMING_SNAKE_CASE ( self : str) ->Any: """simple docstring""" return len(self.vocab) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = [] for s in text: char_tokens.extend(_UpperCamelCase) return char_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict: """simple docstring""" return self.decoder.get(_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCamelCase): logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase)) return _lowerCamelCase : Tuple = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""") return (vocab_file,)
15
0
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : List[Any] =get_tests_dir("fixtures/test_sentencepiece.model") lowerCAmelCase : Any ={"target_lang": "fi", "source_lang": "en"} lowerCAmelCase : Optional[Any] =">>zh<<" lowerCAmelCase : Union[str, Any] ="Helsinki-NLP/" if is_torch_available(): lowerCAmelCase : Optional[int] ="pt" elif is_tf_available(): lowerCAmelCase : Optional[int] ="tf" else: lowerCAmelCase : Union[str, Any] ="jax" @require_sentencepiece class __snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' _snake_case = MarianTokenizer _snake_case = False _snake_case = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: """simple docstring""" super().setUp() _lowerCamelCase : Any = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] _lowerCamelCase : Union[str, Any] = dict(zip(_lowercase , range(len(_lowercase)))) _lowerCamelCase : Any = Path(self.tmpdirname) save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["""vocab"""]) save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""]) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["""source_spm"""]) copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["""target_spm"""]) _lowerCamelCase : Any = MarianTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : List[str]) ->Union[str, Any]: """simple docstring""" return MarianTokenizer.from_pretrained(self.tmpdirname , **_lowercase) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[Any]) ->str: """simple docstring""" return ( "This is a test", "This is a test", ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Dict = """</s>""" _lowerCamelCase : List[str] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase) def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: """simple docstring""" _lowerCamelCase : int = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """</s>""") self.assertEqual(vocab_keys[1] , """<unk>""") self.assertEqual(vocab_keys[-1] , """<pad>""") self.assertEqual(len(_lowercase) , 9) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 9) def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: """simple docstring""" _lowerCamelCase : Union[str, Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""") _lowerCamelCase : Optional[Any] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=_lowercase) self.assertIsInstance(_lowercase , _lowercase) _lowerCamelCase : Optional[int] = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(_lowercase , batch.input_ids[0]) _lowerCamelCase : str = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(_lowercase) _lowerCamelCase : Any = [x.name for x in Path(_lowercase).glob("""*""")] self.assertIn("""source.spm""" , _lowercase) MarianTokenizer.from_pretrained(_lowercase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Optional[int] = tok( ["""I am a small frog""" * 1000, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors=_lowercase) self.assertIsInstance(_lowercase , _lowercase) self.assertEqual(batch.input_ids.shape , (2, 512)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=_lowercase , return_tensors=_lowercase) self.assertIsInstance(_lowercase , _lowercase) self.assertEqual(batch_smaller.input_ids.shape , (2, 10)) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: """simple docstring""" _lowerCamelCase : int = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict: """simple docstring""" _lowerCamelCase : Dict = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""") _lowerCamelCase : Optional[int] = """Tämä on testi""" _lowerCamelCase : Tuple = """This is a test""" _lowerCamelCase : List[Any] = [76, 7, 2047, 2] _lowerCamelCase : List[Any] = [69, 12, 11, 940, 2] _lowerCamelCase : Union[str, Any] = tokenizer(_lowercase).input_ids self.assertListEqual(_lowercase , _lowercase) _lowerCamelCase : Optional[int] = tokenizer(text_target=_lowercase).input_ids self.assertListEqual(_lowercase , _lowercase) _lowerCamelCase : Tuple = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase) self.assertEqual(_lowercase , _lowercase)
718
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Tuple = ["""a""", """b""", """c"""] # Defaults to last layer if both are None _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""c"""]) self.assertEqual(_UpperCamelCase , [2]) # Out indices set to match out features _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features set to match out indices _lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [0, 2]) # Out features selected from negative indices _lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase) self.assertEqual(_UpperCamelCase , ["""a""", """c"""]) self.assertEqual(_UpperCamelCase , [-3, -1]) def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase) # Out features must be a list with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""]) # Out features must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""]) # Out indices must be a list or tuple with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""]) # Out indices must be a subset of stage names with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""]) # Out features and out indices must be the same length with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""]) # Out features should match out indices with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""]) # Out features and out indices should be in order with self.assertRaises(_UpperCamelCase): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""]) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""]) def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" _lowerCamelCase : int = BackboneMixin() _lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""] _lowerCamelCase : Tuple = ["""a""", """c"""] _lowerCamelCase : List[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly _lowerCamelCase : str = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""]) self.assertEqual(backbone.out_indices , [0, 1]) _lowerCamelCase : Optional[int] = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""]) self.assertEqual(backbone.out_indices , [-3, -1])
15
0
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __snake_case : '''simple docstring''' def __init__( self : int , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any]=13 , _UpperCamelCase : List[str]=64 , _UpperCamelCase : Any=2 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : str=True , _UpperCamelCase : List[str]=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Tuple=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : str=10 , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Dict=None , ) ->int: """simple docstring""" _lowerCamelCase : int = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Tuple = image_size _lowerCamelCase : List[Any] = patch_size _lowerCamelCase : Optional[Any] = num_channels _lowerCamelCase : List[str] = is_training _lowerCamelCase : List[Any] = use_labels _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : List[Any] = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Optional[int] = hidden_act _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : int = type_sequence_label_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Tuple = scope _lowerCamelCase : Dict = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size _lowerCamelCase : Optional[int] = (self.image_size // 32) ** 2 _lowerCamelCase : List[str] = num_patches + 1 def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCamelCase : str = None if self.use_labels: _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : Dict = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: """simple docstring""" _lowerCamelCase : Union[str, Any] = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [4, 8, 16, 32], """num_groups""": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A__ , ) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _lowerCamelCase : Optional[int] = ViTHybridModel(config=A__) model.to(A__) model.eval() _lowerCamelCase : Tuple = model(A__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any]) ->Dict: """simple docstring""" _lowerCamelCase : Dict = self.type_sequence_label_size _lowerCamelCase : Dict = ViTHybridForImageClassification(A__) model.to(A__) model.eval() _lowerCamelCase : Any = model(A__ , labels=A__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple: """simple docstring""" _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : Union[str, Any] = config_and_inputs _lowerCamelCase : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () _snake_case = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) _snake_case = False _snake_case = False _snake_case = False def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: """simple docstring""" _lowerCamelCase : Union[str, Any] = ViTHybridModelTester(self) _lowerCamelCase : List[Any] = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""") def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : int = model_class(A__) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _lowerCamelCase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ , nn.Linear)) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple: """simple docstring""" _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[str] = model_class(A__) _lowerCamelCase : str = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Optional[int] = [*signature.parameters.keys()] _lowerCamelCase : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A__) def _SCREAMING_SNAKE_CASE ( self : str) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: """simple docstring""" _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Any = _config_zero_init(A__) for model_class in self.all_model_classes: _lowerCamelCase : List[str] = model_class(config=A__) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": _lowerCamelCase : Tuple = [F"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Union[str, Any] = ViTHybridModel.from_pretrained(A__) self.assertIsNotNone(A__) def A__ ( ): '''simple docstring''' _lowerCamelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : str = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( A__) _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[int] = image_processor(images=A__ , return_tensors="""pt""").to(A__) # forward pass with torch.no_grad(): _lowerCamelCase : List[Any] = model(**A__) # verify the logits _lowerCamelCase : Tuple = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , A__) _lowerCamelCase : List[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9]).to(A__) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4)) @slow @require_accelerate def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : str = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""") _lowerCamelCase : Tuple = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""") _lowerCamelCase : Optional[Any] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(images=A__ , return_tensors="""pt""") _lowerCamelCase : Union[str, Any] = model(**A__) _lowerCamelCase : Tuple = outputs.logits # model predicts one of the 1000 ImageNet classes _lowerCamelCase : Union[str, Any] = logits.argmax(-1).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""")
719
import math def A__ ( __A ): '''simple docstring''' assert isinstance(__A , __A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def A__ ( __A , __A=1 , **__A ): '''simple docstring''' _lowerCamelCase : Dict = factor * value _lowerCamelCase : str = value while not is_prime(__A ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **__A ) return value
15
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_a).to(_a) _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""google/mt5-small""") _lowerCamelCase : List[str] = tokenizer("""Hello there""" , return_tensors="""pt""").input_ids _lowerCamelCase : List[Any] = tokenizer("""Hi I am""" , return_tensors="""pt""").input_ids _lowerCamelCase : str = model(input_ids.to(_a) , labels=labels.to(_a)).loss _lowerCamelCase : Tuple = -(labels.shape[-1] * loss.item()) _lowerCamelCase : Union[str, Any] = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
720
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple: """simple docstring""" super().__init__(*_UpperCamelCase , **_UpperCamelCase) requires_backends(self , """vision""") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[int] = {} if top_k is not None: _lowerCamelCase : str = top_k return {}, {}, postprocess_params def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict: """simple docstring""" return super().__call__(_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Tuple = load_image(_UpperCamelCase) _lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework) return model_inputs def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]: """simple docstring""" _lowerCamelCase : Any = self.model(**_UpperCamelCase) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str: """simple docstring""" if top_k > self.model.config.num_labels: _lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": _lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0] _lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase) elif self.framework == "tf": _lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0] _lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase) _lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"""Unsupported framework: {self.framework}""") _lowerCamelCase : str = scores.tolist() _lowerCamelCase : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
15
0
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCAmelCase : str =get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, "r", encoding="utf-8") as f: lowerCAmelCase : List[Any] =json.load(f) @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str) ->Optional[Any]: """simple docstring""" return FSMTTokenizer.from_pretrained(UpperCamelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[str]) ->Optional[int]: """simple docstring""" _lowerCamelCase : int = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__).to(UpperCamelCase__) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 2_6.0], ["""ru-en""", 2_2.0], ["""en-de""", 2_2.0], ["""de-en""", 2_9.0], ]) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any]) ->Any: """simple docstring""" _lowerCamelCase : Optional[Any] = F"""facebook/wmt19-{pair}""" _lowerCamelCase : Dict = self.get_tokenizer(UpperCamelCase__) _lowerCamelCase : Union[str, Any] = self.get_model(UpperCamelCase__) _lowerCamelCase : Optional[int] = bleu_data[pair]["""src"""] _lowerCamelCase : Any = bleu_data[pair]["""tgt"""] _lowerCamelCase : Any = tokenizer(UpperCamelCase__ , return_tensors="""pt""" , truncation=UpperCamelCase__ , padding="""longest""").to(UpperCamelCase__) _lowerCamelCase : List[str] = model.generate( input_ids=batch.input_ids , num_beams=8 , ) _lowerCamelCase : Optional[int] = tokenizer.batch_decode( UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) _lowerCamelCase : Dict = calculate_bleu(UpperCamelCase__ , UpperCamelCase__) print(UpperCamelCase__) self.assertGreaterEqual(scores["""bleu"""] , UpperCamelCase__)
721
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' _snake_case = ViTImageProcessor if is_vision_available() else None @property def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = (3, 32, 128) _lowerCamelCase : str = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase)))) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(_UpperCamelCase) + """\n""") _lowerCamelCase : Any = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: """simple docstring""" _lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) _lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1)) return image_input def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: """simple docstring""" _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0) _lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any) ->int: """simple docstring""" _lowerCamelCase : int = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""") _lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: """simple docstring""" _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Optional[int] = """test""" _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase) _lowerCamelCase : Dict = tokenizer(_UpperCamelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = """test""" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase): processor() def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Any = processor.char_decode(_UpperCamelCase) _lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase) _lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: """simple docstring""" _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : int = None _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase) _lowerCamelCase : Any = torch.randn(1 , 27 , 38) _lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257) _lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522) _lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
15
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Optional[int] ={"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] =[ """VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMAEForPreTraining""", """ViTMAELayer""", """ViTMAEModel""", """ViTMAEPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str =[ """TFViTMAEForPreTraining""", """TFViTMAEModel""", """TFViTMAEPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys lowerCAmelCase : int =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
700
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[int] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__A , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__A ) return parser.parse_args() def A__ ( ): '''simple docstring''' _lowerCamelCase : List[str] = parse_args() # Import training_script as a module. _lowerCamelCase : List[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _lowerCamelCase : Optional[Any] = script_fpath.stem _lowerCamelCase : Dict = importlib.import_module(__A ) # Patch sys.argv _lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
15
0
import math import unittest def A__ ( __A ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: """simple docstring""" self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple: """simple docstring""" with self.assertRaises(lowercase__): is_prime(-19) self.assertFalse( is_prime(0) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
701
def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) if n == 0: return 0 _lowerCamelCase : Tuple = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) _lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def A__ ( __A , __A , __A ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCamelCase : int = float("""-inf""" ) for i in range(1 , n + 1 ): _lowerCamelCase : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) _lowerCamelCase : Optional[Any] = max_revenue return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )] _lowerCamelCase : Any = 0 for i in range(1 , n + 1 ): _lowerCamelCase : Any = max_rev[i] for j in range(1 , i + 1 ): _lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) _lowerCamelCase : int = max_revenue_i return max_rev[n] def A__ ( __A , __A ): '''simple docstring''' if n < 0: _lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): _lowerCamelCase : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ F"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = [6, 10, 12, 15, 20, 23] _lowerCamelCase : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCamelCase : Tuple = 36 _lowerCamelCase : Any = top_down_cut_rod(__A , __A ) _lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A ) _lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
15
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : Optional[int] ={ "configuration_rag": ["RagConfig"], "retrieval_rag": ["RagRetriever"], "tokenization_rag": ["RagTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str =[ "RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] =[ "TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCAmelCase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
702
from __future__ import annotations class __snake_case : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str: """simple docstring""" _lowerCamelCase : Union[str, Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Any = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned _lowerCamelCase : Optional[Any] = """""" for ch in content: ans += chr(ord(_UpperCamelCase) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase) try: with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
15
0
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") lowerCAmelCase : Optional[int] =logging.getLogger(__name__) @dataclass class __snake_case : '''simple docstring''' _snake_case = field( default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) _snake_case = field( default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , ) _snake_case = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _snake_case = field( default=__A , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) _snake_case = field( default=__A , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) _snake_case = field( default=__A , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) _snake_case = field( default=__A , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) _snake_case = field( default=__A , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) _snake_case = field( default=__A , metadata={'help': 'A csv or a json file containing the training data.'} ) _snake_case = field( default=__A , metadata={'help': 'A csv or a json file containing the validation data.'} ) _snake_case = field(default=__A , metadata={'help': 'A csv or a json file containing the test data.'} ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""") else: _lowerCamelCase : Any = self.train_file.split(""".""")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." _lowerCamelCase : List[Any] = self.validation_file.split(""".""")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __snake_case : '''simple docstring''' _snake_case = field( default=__A , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _snake_case = field( default=__A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _snake_case = field( default=__A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _snake_case = field( default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) _snake_case = field( default=__A , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) _snake_case = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) _snake_case = field( default=__A , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) def A__ ( ): '''simple docstring''' _lowerCamelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) _lowerCamelCase : List[str] = training_args.get_process_log_level() logger.setLevel(__lowercase ) datasets.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _lowerCamelCase : List[str] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _lowerCamelCase : Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. _lowerCamelCase : Tuple = {'train': data_args.train_file, 'validation': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: _lowerCamelCase : str = data_args.train_file.split(""".""" )[-1] _lowerCamelCase : Optional[int] = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." _lowerCamelCase : List[str] = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(F"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files _lowerCamelCase : Any = load_dataset("""csv""" , data_files=__lowercase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files _lowerCamelCase : List[str] = load_dataset("""json""" , data_files=__lowercase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels _lowerCamelCase : List[str] = raw_datasets['train'].features['label'].names _lowerCamelCase : Union[str, Any] = len(__lowercase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : int = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer _lowerCamelCase : str = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowercase , ) _lowerCamelCase : Any = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: _lowerCamelCase : Optional[Any] = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _lowerCamelCase : List[Any] = False # Some models have set the order of the labels to use, so let's make sure we do use it. _lowerCamelCase : List[str] = {'Refused': 0, 'Entailed': 1} _lowerCamelCase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) _lowerCamelCase : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__A ): # Tokenize the texts def _convert_table_text_to_pandas(__A ): _lowerCamelCase : int = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] _lowerCamelCase : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd _lowerCamelCase : int = examples['statement'] _lowerCamelCase : str = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) _lowerCamelCase : List[str] = tokenizer(__lowercase , __lowercase , padding=__lowercase , max_length=__lowercase , truncation=__lowercase ) _lowerCamelCase : Dict = examples['label'] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): _lowerCamelCase : str = raw_datasets.map( __lowercase , batched=__lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) _lowerCamelCase : int = raw_datasets['train'] if data_args.max_train_samples is not None: _lowerCamelCase : str = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) _lowerCamelCase : List[str] = raw_datasets['validation'] if data_args.max_eval_samples is not None: _lowerCamelCase : int = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) _lowerCamelCase : int = raw_datasets['test'] if data_args.max_predict_samples is not None: _lowerCamelCase : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__lowercase ) ) , 3 ): logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__A ): _lowerCamelCase : Optional[int] = p.predictions[0] if isinstance(p.predictions , __lowercase ) else p.predictions _lowerCamelCase : List[Any] = np.argmax(__lowercase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _lowerCamelCase : int = default_data_collator elif training_args.fpaa: _lowerCamelCase : Any = DataCollatorWithPadding(__lowercase , pad_to_multiple_of=8 ) else: _lowerCamelCase : Union[str, Any] = None # Initialize our Trainer _lowerCamelCase : str = Trainer( model=__lowercase , args=__lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _lowerCamelCase : Tuple = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : str = last_checkpoint _lowerCamelCase : Any = trainer.train(resume_from_checkpoint=__lowercase ) _lowerCamelCase : Optional[Any] = train_result.metrics _lowerCamelCase : str = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase ) ) _lowerCamelCase : Any = min(__lowercase , len(__lowercase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __lowercase ) trainer.save_metrics("""train""" , __lowercase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _lowerCamelCase : List[Any] = trainer.evaluate(eval_dataset=__lowercase ) _lowerCamelCase : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowercase ) _lowerCamelCase : str = min(__lowercase , len(__lowercase ) ) trainer.log_metrics("""eval""" , __lowercase ) trainer.save_metrics("""eval""" , __lowercase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. _lowerCamelCase : Optional[int] = predict_dataset.remove_columns("""label""" ) _lowerCamelCase : Union[str, Any] = trainer.predict(__lowercase , metric_key_prefix="""predict""" ).predictions _lowerCamelCase : Union[str, Any] = np.argmax(__lowercase , axis=1 ) _lowerCamelCase : Union[str, Any] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(__lowercase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(__lowercase ): _lowerCamelCase : List[str] = label_list[item] writer.write(F"""{index}\t{item}\n""" ) _lowerCamelCase : Dict = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'} if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) def A__ ( __A ): '''simple docstring''' main() if __name__ == "__main__": main()
703
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
0
from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=__lowercase ): '''simple docstring''' _snake_case = ['keras_nlp'] def __init__( self : Any , *_UpperCamelCase : Dict , **_UpperCamelCase : List[str]) ->List[Any]: """simple docstring""" requires_backends(self , ["""keras_nlp"""])
704
lowerCAmelCase : Tuple =0 # The first color of the flag. lowerCAmelCase : Union[str, Any] =1 # The second color of the flag. lowerCAmelCase : Any =2 # The third color of the flag. lowerCAmelCase : List[str] =(red, white, blue) def A__ ( __A ): '''simple docstring''' if not sequence: return [] if len(__A ) == 1: return list(__A ) _lowerCamelCase : int = 0 _lowerCamelCase : Dict = len(__A ) - 1 _lowerCamelCase : str = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid] high -= 1 else: _lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip() lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")] print(F"""{dutch_national_flag_sort(unsorted)}""")
15
0
from string import ascii_uppercase lowerCAmelCase : Optional[Any] ={str(ord(c) - 55): c for c in ascii_uppercase} def A__ ( __A , __A ): '''simple docstring''' if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("""int() can't convert non-string with explicit base""" ) if num < 0: raise ValueError("""parameter must be positive int""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("""'str' object cannot be interpreted as an integer""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("""'float' object cannot be interpreted as an integer""" ) if base in (0, 1): raise ValueError("""base must be >= 2""" ) if base > 36: raise ValueError("""base must be <= 36""" ) _lowerCamelCase : Union[str, Any] = """""" _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : Any = 0 while div != 1: _lowerCamelCase , _lowerCamelCase : Optional[Any] = divmod(lowerCAmelCase__ , lowerCAmelCase__ ) if base >= 11 and 9 < mod < 36: _lowerCamelCase : Union[str, Any] = ALPHABET_VALUES[str(lowerCAmelCase__ )] else: _lowerCamelCase : Optional[int] = str(lowerCAmelCase__ ) new_value += actual_value _lowerCamelCase : Dict = num // base _lowerCamelCase : Dict = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(lowerCAmelCase__ ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
705
from __future__ import annotations lowerCAmelCase : int =[] def A__ ( __A , __A , __A ): '''simple docstring''' for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def A__ ( __A , __A ): '''simple docstring''' if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): _lowerCamelCase : int = 1 solve(__A , row + 1 ) _lowerCamelCase : List[str] = 0 return False def A__ ( __A ): '''simple docstring''' for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) lowerCAmelCase : int =8 lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
15
0
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : Dict , _UpperCamelCase : str=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : Tuple=18 , _UpperCamelCase : Union[str, Any]=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=True , ) ->Union[str, Any]: """simple docstring""" _lowerCamelCase : int = size if size is not None else {"height": 18, "width": 18} _lowerCamelCase : str = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Tuple = num_channels _lowerCamelCase : Optional[int] = image_size _lowerCamelCase : List[str] = min_resolution _lowerCamelCase : Tuple = max_resolution _lowerCamelCase : Tuple = do_resize _lowerCamelCase : Dict = size _lowerCamelCase : List[str] = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]: """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __snake_case ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' _snake_case = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: """simple docstring""" _lowerCamelCase : List[str] = LayoutLMvaImageProcessingTester(self) @property def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->str: """simple docstring""" _lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(a_ , """do_resize""")) self.assertTrue(hasattr(a_ , """size""")) self.assertTrue(hasattr(a_ , """apply_ocr""")) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" _lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18}) _lowerCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42}) def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: """simple docstring""" _lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images _lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_) for image in image_inputs: self.assertIsInstance(a_ , Image.Image) # Test not batched input _lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""") self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , a_) self.assertIsInstance(encoding.boxes , a_) # Test batched _lowerCamelCase : int = image_processing(a_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def _SCREAMING_SNAKE_CASE ( self : int) ->str: """simple docstring""" _lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray) # Test not batched input _lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _lowerCamelCase : Any = image_processing(a_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" _lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor) # Test not batched input _lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _lowerCamelCase : Union[str, Any] = image_processing(a_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def _SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: """simple docstring""" _lowerCamelCase : Any = LayoutLMvaImageProcessor() from datasets import load_dataset _lowerCamelCase : Optional[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""") _lowerCamelCase : Optional[Any] = Image.open(ds[0]["""file"""]).convert("""RGB""") _lowerCamelCase : List[Any] = image_processing(a_ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224)) self.assertEqual(len(encoding.words) , len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 _lowerCamelCase : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 _lowerCamelCase : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , a_) self.assertListEqual(encoding.boxes , a_) # with apply_OCR = False _lowerCamelCase : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_) _lowerCamelCase : List[str] = image_processing(a_ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
706
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase : int ={ "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A__ ( __A , __A , __A , __A=None ): '''simple docstring''' # Initialise PyTorch model _lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A ) _lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowerCamelCase : int = finetuning_task _lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowerCamelCase : int = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: _lowerCamelCase : Dict = finetuning_task _lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A ) else: _lowerCamelCase : Any = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model _lowerCamelCase : Optional[Any] = os.path.join(__A , __A ) _lowerCamelCase : Any = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) lowerCAmelCase : Union[str, Any] =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
15
0
from __future__ import annotations def A__ ( __A ): return len(set(_snake_case ) ) == len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
707
def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = 0 for ch in input_str: _lowerCamelCase : Optional[Any] = ord(__A ) _lowerCamelCase : List[str] = pow(2 , __A ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
15
0
'''simple docstring''' from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 lowerCAmelCase : Dict ={ # 1536-bit 5: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, } class __snake_case : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCamelCase : str = 14) ->None: """simple docstring""" if group not in primes: raise ValueError("""Unsupported Group""") _lowerCamelCase : Union[str, Any] = primes[group]["""prime"""] _lowerCamelCase : Optional[Any] = primes[group]["""generator"""] _lowerCamelCase : Optional[Any] = int(hexlify(urandom(32)) , base=16) def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" return hex(self.__private_key)[2:] def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str: """simple docstring""" _lowerCamelCase : Any = pow(self.generator , self.__private_key , self.prime) return hex(_SCREAMING_SNAKE_CASE)[2:] def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : List[Any]) ->bool: """simple docstring""" return ( 2 <= key <= self.prime - 2 and pow(_SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime) == 1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : List[str]) ->str: """simple docstring""" _lowerCamelCase : Optional[Any] = int(_SCREAMING_SNAKE_CASE , base=16) if not self.is_valid_public_key(_SCREAMING_SNAKE_CASE): raise ValueError("""Invalid public key""") _lowerCamelCase : str = pow(_SCREAMING_SNAKE_CASE , self.__private_key , self.prime) return shaaaa(str(_SCREAMING_SNAKE_CASE).encode()).hexdigest() @staticmethod def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any]) ->bool: """simple docstring""" return ( 2 <= remote_public_key_str <= prime - 2 and pow(_SCREAMING_SNAKE_CASE , (prime - 1) // 2 , _SCREAMING_SNAKE_CASE) == 1 ) @staticmethod def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Any = 14) ->str: """simple docstring""" _lowerCamelCase : Dict = int(_SCREAMING_SNAKE_CASE , base=16) _lowerCamelCase : str = int(_SCREAMING_SNAKE_CASE , base=16) _lowerCamelCase : List[Any] = primes[group]["""prime"""] if not DiffieHellman.is_valid_public_key_static(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE): raise ValueError("""Invalid public key""") _lowerCamelCase : int = pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) return shaaaa(str(_SCREAMING_SNAKE_CASE).encode()).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
708
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int: """simple docstring""" _lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""") _lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : str = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""") _lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]]) # The dog is cute and lives in the garden house _lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach() self.assertEqual(output.shape , _UpperCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
15
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) lowerCAmelCase : int =logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase : Union[str, Any] ='\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def A__ ( __A , __A , __A=8 ): '''simple docstring''' _lowerCamelCase : List[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _lowerCamelCase : Dict = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class __snake_case ( _A ): '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : UNetaDConditionModel , _UpperCamelCase : DDPMScheduler , _UpperCamelCase : VQModel , ) ->Union[str, Any]: """simple docstring""" super().__init__() self.register_modules( unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , ) _lowerCamelCase : Dict = 2 ** (len(self.movq.config.block_out_channels) - 1) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any]) ->Tuple: """simple docstring""" if latents is None: _lowerCamelCase : Union[str, Any] = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""") _lowerCamelCase : Union[str, Any] = latents.to(UpperCamelCase__) _lowerCamelCase : str = latents * scheduler.init_noise_sigma return latents def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : List[Any]=0) ->Union[str, Any]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""") _lowerCamelCase : List[Any] = torch.device(F"""cuda:{gpu_id}""") _lowerCamelCase : List[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase__ , UpperCamelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Dict=0) ->int: """simple docstring""" if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0"""): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""") _lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""") if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase__) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _lowerCamelCase : str = None for cpu_offloaded_model in [self.unet, self.movq]: _lowerCamelCase , _lowerCamelCase : int = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__) # We'll offload the last model manually. _lowerCamelCase : Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple: """simple docstring""" if not hasattr(self.unet , """_hf_hook"""): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase__ , """_hf_hook""") and hasattr(module._hf_hook , """execution_device""") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase__) def __call__( self : Dict , _UpperCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCamelCase : int = 512 , _UpperCamelCase : int = 512 , _UpperCamelCase : int = 100 , _UpperCamelCase : float = 4.0 , _UpperCamelCase : int = 1 , _UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[str] = "pil" , _UpperCamelCase : bool = True , ) ->int: """simple docstring""" _lowerCamelCase : int = self._execution_device _lowerCamelCase : Union[str, Any] = guidance_scale > 1.0 if isinstance(UpperCamelCase__ , UpperCamelCase__): _lowerCamelCase : List[Any] = torch.cat(UpperCamelCase__ , dim=0) _lowerCamelCase : Dict = image_embeds.shape[0] * num_images_per_prompt if isinstance(UpperCamelCase__ , UpperCamelCase__): _lowerCamelCase : Tuple = torch.cat(UpperCamelCase__ , dim=0) if do_classifier_free_guidance: _lowerCamelCase : str = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0) _lowerCamelCase : int = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0) _lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=UpperCamelCase__) self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__) _lowerCamelCase : List[Any] = self.scheduler.timesteps _lowerCamelCase : List[str] = self.unet.config.in_channels _lowerCamelCase , _lowerCamelCase : List[Any] = downscale_height_and_width(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor) # create initial latent _lowerCamelCase : int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase__)): # expand the latents if we are doing classifier free guidance _lowerCamelCase : Optional[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents _lowerCamelCase : Optional[Any] = {"""image_embeds""": image_embeds} _lowerCamelCase : List[Any] = self.unet( sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0] if do_classifier_free_guidance: _lowerCamelCase , _lowerCamelCase : Any = noise_pred.split(latents.shape[1] , dim=1) _lowerCamelCase , _lowerCamelCase : Union[str, Any] = noise_pred.chunk(2) _lowerCamelCase , _lowerCamelCase : Optional[Any] = variance_pred.chunk(2) _lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _lowerCamelCase : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1) if not ( hasattr(self.scheduler.config , """variance_type""") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _lowerCamelCase , _lowerCamelCase : Any = noise_pred.split(latents.shape[1] , dim=1) # compute the previous noisy sample x_t -> x_t-1 _lowerCamelCase : Tuple = self.scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , )[0] # post-processing _lowerCamelCase : Tuple = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__)["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""") if output_type in ["np", "pil"]: _lowerCamelCase : Tuple = image * 0.5 + 0.5 _lowerCamelCase : Union[str, Any] = image.clamp(0 , 1) _lowerCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": _lowerCamelCase : Optional[int] = self.numpy_to_pil(UpperCamelCase__) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase__)
709
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Tuple =logging.get_logger(__name__) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = ['pixel_values'] def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256} _lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase) _lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = do_resize _lowerCamelCase : int = size _lowerCamelCase : Optional[int] = resample _lowerCamelCase : int = do_center_crop _lowerCamelCase : Optional[Any] = crop_size _lowerCamelCase : Union[str, Any] = do_rescale _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : List[Any] = do_normalize _lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return resize( _UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray: """simple docstring""" _lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str: """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray: """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image: """simple docstring""" _lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[str] = resample if resample is not None else self.resample _lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Optional[Any] = size if size is not None else self.size _lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase) _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""") _lowerCamelCase : int = make_list_of_images(_UpperCamelCase) if not valid_images(_UpperCamelCase): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images] if do_resize: _lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images] if do_center_crop: _lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images] if do_rescale: _lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images] if do_normalize: _lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images] _lowerCamelCase : int = {"""pixel_values""": images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
15
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]=7 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : Any=18 , _UpperCamelCase : int=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : int=32 , _UpperCamelCase : Optional[int]=True , ) ->str: """simple docstring""" _lowerCamelCase : Optional[int] = parent _lowerCamelCase : List[str] = batch_size _lowerCamelCase : int = num_channels _lowerCamelCase : List[str] = image_size _lowerCamelCase : Any = min_resolution _lowerCamelCase : Optional[Any] = max_resolution _lowerCamelCase : Optional[int] = do_resize _lowerCamelCase : str = size_divisor _lowerCamelCase : Dict = do_rescale def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: """simple docstring""" return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __snake_case ( snake_case__ , unittest.TestCase ): '''simple docstring''' _snake_case = GLPNImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: """simple docstring""" _lowerCamelCase : List[Any] = GLPNImageProcessingTester(self) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: """simple docstring""" _lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_A , """do_resize""")) self.assertTrue(hasattr(_A , """size_divisor""")) self.assertTrue(hasattr(_A , """resample""")) self.assertTrue(hasattr(_A , """do_rescale""")) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: """simple docstring""" _lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) # create random PIL images _lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A) for image in image_inputs: self.assertIsInstance(_A , Image.Image) # Test not batched input (GLPNImageProcessor doesn't support batching) _lowerCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: """simple docstring""" _lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A) for image in image_inputs: self.assertIsInstance(_A , np.ndarray) # Test not batched input (GLPNImageProcessor doesn't support batching) _lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) def _SCREAMING_SNAKE_CASE ( self : int) ->int: """simple docstring""" _lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor) # Test not batched input (GLPNImageProcessor doesn't support batching) _lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
710
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float: """simple docstring""" return 0.0 def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Tuple = [1] + [0] * (size - 1) _lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) ) _lowerCamelCase : List[Any] = 20 * np.logaa(__A ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _lowerCamelCase : Any = get_bounds(__A , __A ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__A ) plt.show() def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = 512 _lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCamelCase : int = [filter_type.process(__A ) for item in inputs] _lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCamelCase : Any = np.angle(np.fft.fft(__A ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__A , -2 * pi ) ) plt.show()
15
0
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Tuple =logging.get_logger(__name__) lowerCAmelCase : Dict ="▁" lowerCAmelCase : List[str] ={ "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", "tokenizer_config_file": "tokenizer_config.json", } lowerCAmelCase : Union[str, Any] ={ "vocab_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json", }, "spm_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model", }, "tokenizer_config_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json", }, } lowerCAmelCase : Union[str, Any] ={ "facebook/m2m100_418M": 1024, } # fmt: off lowerCAmelCase : int ={ "m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"], "wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"] } class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = ['input_ids', 'attention_mask'] _snake_case = [] _snake_case = [] def __init__( self : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : List[Any]="</s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : str="<pad>" , _UpperCamelCase : Optional[Any]="<unk>" , _UpperCamelCase : Optional[Any]="m2m100" , _UpperCamelCase : Any = None , _UpperCamelCase : Any=8 , **_UpperCamelCase : int , ) ->None: """simple docstring""" _lowerCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCamelCase : Any = language_codes _lowerCamelCase : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes] _lowerCamelCase : int = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code} _lowerCamelCase : str = kwargs.get("""additional_special_tokens""" , []) kwargs["additional_special_tokens"] += [ self.get_lang_token(__a) for lang_code in fairseq_language_code if self.get_lang_token(__a) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__a , tgt_lang=__a , bos_token=__a , eos_token=__a , sep_token=__a , unk_token=__a , pad_token=__a , language_codes=__a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__a , **__a , ) _lowerCamelCase : Optional[Any] = vocab_file _lowerCamelCase : List[str] = load_json(__a) _lowerCamelCase : Any = {v: k for k, v in self.encoder.items()} _lowerCamelCase : int = spm_file _lowerCamelCase : Any = load_spm(__a , self.sp_model_kwargs) _lowerCamelCase : Dict = len(self.encoder) _lowerCamelCase : str = { self.get_lang_token(__a): self.encoder_size + i for i, lang_code in enumerate(__a) } _lowerCamelCase : List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__a)} _lowerCamelCase : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()} _lowerCamelCase : Union[str, Any] = src_lang if src_lang is not None else """en""" _lowerCamelCase : Optional[Any] = tgt_lang _lowerCamelCase : Tuple = self.get_lang_id(self._src_lang) self.set_src_lang_special_tokens(self._src_lang) _lowerCamelCase : Any = num_madeup_words @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: """simple docstring""" return len(self.encoder) + len(self.lang_token_to_id) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: """simple docstring""" return self._src_lang @src_lang.setter def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Tuple) ->None: """simple docstring""" _lowerCamelCase : List[str] = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Any) ->List[str]: """simple docstring""" return self.sp_model.encode(__a , out_type=__a) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Union[str, Any]) ->Tuple: """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__a , self.encoder[self.unk_token]) def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : List[str]) ->str: """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__a , self.unk_token) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : int) ->Optional[int]: """simple docstring""" _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Tuple = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__a) + token _lowerCamelCase : Tuple = [] else: current_sub_tokens.append(__a) out_string += self.sp_model.decode(__a) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str = None , _UpperCamelCase : Dict = False) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a) _lowerCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens) _lowerCamelCase : Dict = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(__a)) + suffix_ones return prefix_ones + ([0] * len(__a)) + ([0] * len(__a)) + suffix_ones def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] = None) ->List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _SCREAMING_SNAKE_CASE ( self : int) ->Dict: """simple docstring""" _lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : str) ->Dict: """simple docstring""" _lowerCamelCase : Optional[int] = self.__dict__.copy() _lowerCamelCase : List[Any] = None return state def __setstate__( self : List[str] , _UpperCamelCase : Union[str, Any]) ->None: """simple docstring""" _lowerCamelCase : Tuple = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _lowerCamelCase : int = {} _lowerCamelCase : List[Any] = load_spm(self.spm_file , self.sp_model_kwargs) def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] = None) ->Tuple[str]: """simple docstring""" _lowerCamelCase : Optional[int] = Path(__a) if not save_dir.is_dir(): raise OSError(F"""{save_directory} should be a directory""") _lowerCamelCase : Optional[Any] = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) _lowerCamelCase : Dict = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __a) if os.path.abspath(self.spm_file) != os.path.abspath(__a) and os.path.isfile(self.spm_file): copyfile(self.spm_file , __a) elif not os.path.isfile(self.spm_file): with open(__a , """wb""") as fi: _lowerCamelCase : str = self.sp_model.serialized_model_proto() fi.write(__a) return (str(__a), str(__a)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Any = "en" , _UpperCamelCase : int = None , _UpperCamelCase : str = "ro" , **_UpperCamelCase : Dict , ) ->BatchEncoding: """simple docstring""" _lowerCamelCase : Union[str, Any] = src_lang _lowerCamelCase : List[str] = tgt_lang self.set_src_lang_special_tokens(self.src_lang) return super().prepare_seqaseq_batch(__a , __a , **__a) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : str , **_UpperCamelCase : Union[str, Any]) ->int: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""") _lowerCamelCase : Union[str, Any] = src_lang _lowerCamelCase : Tuple = self(__a , add_special_tokens=__a , **__a) _lowerCamelCase : str = self.get_lang_id(__a) _lowerCamelCase : Union[str, Any] = tgt_lang_id return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: """simple docstring""" self.set_src_lang_special_tokens(self.src_lang) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any]) ->None: """simple docstring""" _lowerCamelCase : Any = self.get_lang_token(__a) _lowerCamelCase : List[Any] = self.lang_token_to_id[lang_token] _lowerCamelCase : Tuple = [self.cur_lang_id] _lowerCamelCase : Optional[Any] = [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Dict) ->None: """simple docstring""" _lowerCamelCase : Tuple = self.get_lang_token(__a) _lowerCamelCase : List[Any] = self.lang_token_to_id[lang_token] _lowerCamelCase : int = [self.cur_lang_id] _lowerCamelCase : Optional[int] = [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Union[str, Any]) ->str: """simple docstring""" return self.lang_code_to_token[lang] def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Dict) ->int: """simple docstring""" _lowerCamelCase : List[str] = self.get_lang_token(__a) return self.lang_token_to_id[lang_token] def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : List[str] = sentencepiece.SentencePieceProcessor(**__snake_case ) spm.Load(str(__snake_case ) ) return spm def A__ ( __A ): '''simple docstring''' with open(__snake_case , """r""" ) as f: return json.load(__snake_case ) def A__ ( __A , __A ): '''simple docstring''' with open(__snake_case , """w""" ) as f: json.dump(__snake_case , __snake_case , indent=2 )
711
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ): '''simple docstring''' output_path.parent.mkdir(parents=__A , exist_ok=__A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , ) else: export( __A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , ) @torch.no_grad() def A__ ( __A , __A , __A , __A = False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCamelCase : str = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Dict = Path(__A ) # VAE DECODER _lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCamelCase : List[str] = vae_decoder.config.latent_channels # forward only through the decoder part _lowerCamelCase : Tuple = vae_decoder.decode onnx_export( __A , model_args=( torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__A , ) del vae_decoder if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowerCAmelCase : Optional[Any] =parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
15
0
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) lowerCAmelCase : Any =logging.getLogger(__name__) lowerCAmelCase : int ="Hello world! cécé herlolip" lowerCAmelCase : Optional[int] =namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Tuple = BertAbsConfig( temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , ) _lowerCamelCase : int = torch.load(lowerCamelCase_ , lambda __A , __A : storage ) _lowerCamelCase : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ ) original.eval() _lowerCamelCase : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) _lowerCamelCase : int = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs _lowerCamelCase : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) ) _lowerCamelCase : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) _lowerCamelCase : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) ) _lowerCamelCase : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass _lowerCamelCase : Optional[int] = encoder_input_ids _lowerCamelCase : Optional[Any] = decoder_input_ids _lowerCamelCase : List[str] = None _lowerCamelCase : Tuple = None _lowerCamelCase : int = None _lowerCamelCase : List[Any] = None _lowerCamelCase : Optional[int] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical _lowerCamelCase : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] _lowerCamelCase : Optional[Any] = original.generator(lowerCamelCase_ ) _lowerCamelCase : List[Any] = new_model( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] _lowerCamelCase : str = new_model.generator(lowerCamelCase_ ) _lowerCamelCase : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) _lowerCamelCase : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) _lowerCamelCase : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) lowerCAmelCase : Dict =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
712
from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K) def A__ ( __A , __A , __A , ): '''simple docstring''' if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
0
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowerCAmelCase : Tuple =logging.getLogger(__name__) def A__ ( __A , __A ): '''simple docstring''' return (preds == labels).mean() @dataclass class __snake_case : '''simple docstring''' _snake_case = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _snake_case = field( default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _snake_case = field( default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _snake_case = field( default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __snake_case : '''simple docstring''' _snake_case = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) _snake_case = field(metadata={'help': 'Should contain the data files for the task.'} ) _snake_case = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _snake_case = field( default=__lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def A__ ( ): '''simple docstring''' _lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , A__ ) # Set seed set_seed(training_args.seed ) try: _lowerCamelCase : Union[str, Any] = processors[data_args.task_name]() _lowerCamelCase : Optional[Any] = processor.get_labels() _lowerCamelCase : str = len(A__ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : int = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) _lowerCamelCase : Any = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCamelCase : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , ) # Get datasets _lowerCamelCase : Union[str, Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=A__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _lowerCamelCase : Union[str, Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=A__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__A ) -> Dict: _lowerCamelCase : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(A__ , p.label_ids )} # Data collator _lowerCamelCase : str = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _lowerCamelCase : List[Any] = Trainer( model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , data_collator=A__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _lowerCamelCase : Dict = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _lowerCamelCase : Any = trainer.evaluate() _lowerCamelCase : str = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(A__ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , A__ , A__ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(A__ ) return results def A__ ( __A ): '''simple docstring''' main() if __name__ == "__main__": main()
713
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def A__ ( __A ): '''simple docstring''' _lowerCamelCase : Tuple = {} _lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""] _lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] ) return output lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments) lowerCAmelCase : int =parser.parse_args() if args.num_workers is None: lowerCAmelCase : Any =multiprocessing.cpu_count() lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase : str =time.time() lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase : Dict =time.time() lowerCAmelCase : Dict =ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase : Tuple =time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
15
0
from random import shuffle import tensorflow as tf from numpy import array def A__ ( __A , __A ): '''simple docstring''' _lowerCamelCase : Any = int(lowerCamelCase__ ) assert noofclusters < len(lowerCamelCase__ ) # Find out the dimensionality _lowerCamelCase : Any = len(vectors[0] ) # Will help select random centroids from among the available vectors _lowerCamelCase : List[str] = list(range(len(lowerCamelCase__ ) ) ) shuffle(lowerCamelCase__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. _lowerCamelCase : List[str] = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION _lowerCamelCase : List[Any] = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points _lowerCamelCase : Tuple = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowerCamelCase__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values _lowerCamelCase : Tuple = tf.placeholder("""float64""" , [dim] ) _lowerCamelCase : List[str] = [] for centroid in centroids: cent_assigns.append(tf.assign(lowerCamelCase__ , lowerCamelCase__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) _lowerCamelCase : str = [tf.Variable(0 ) for i in range(len(lowerCamelCase__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value _lowerCamelCase : Dict = tf.placeholder("""int32""" ) _lowerCamelCase : List[Any] = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowerCamelCase__ , lowerCamelCase__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input _lowerCamelCase : Tuple = tf.placeholder("""float""" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors _lowerCamelCase : Optional[Any] = tf.reduce_mean(lowerCamelCase__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input _lowerCamelCase : Optional[int] = tf.placeholder("""float""" , [dim] ) _lowerCamelCase : int = tf.placeholder("""float""" , [dim] ) _lowerCamelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCamelCase__ , lowerCamelCase__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input _lowerCamelCase : Optional[int] = tf.placeholder("""float""" , [noofclusters] ) _lowerCamelCase : List[Any] = tf.argmin(lowerCamelCase__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. _lowerCamelCase : List[Any] = tf.initialize_all_variables() # Initialize all variables sess.run(lowerCamelCase__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. _lowerCamelCase : Optional[int] = 100 for _ in range(lowerCamelCase__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowerCamelCase__ ) ): _lowerCamelCase : Union[str, Any] = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. _lowerCamelCase : int = [ sess.run(lowerCamelCase__ , feed_dict={va: vect, va: sess.run(lowerCamelCase__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input _lowerCamelCase : Union[str, Any] = sess.run( lowerCamelCase__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowerCamelCase__ ): # Collect all the vectors assigned to this cluster _lowerCamelCase : Optional[int] = [ vectors[i] for i in range(len(lowerCamelCase__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location _lowerCamelCase : Optional[int] = sess.run( lowerCamelCase__ , feed_dict={mean_input: array(lowerCamelCase__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments _lowerCamelCase : str = sess.run(lowerCamelCase__ ) _lowerCamelCase : str = sess.run(lowerCamelCase__ ) return centroids, assignments
714
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _snake_case = IFPipeline _snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: """simple docstring""" return self._get_dummy_components() def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]: """simple docstring""" if str(_UpperCamelCase).startswith("""mps"""): _lowerCamelCase : int = torch.manual_seed(_UpperCamelCase) else: _lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def _SCREAMING_SNAKE_CASE ( self : Any) ->str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def _SCREAMING_SNAKE_CASE ( self : int) ->Any: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: """simple docstring""" self._test_save_load_local() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa) _lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""") _lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""") del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _lowerCamelCase : str = None _lowerCamelCase : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components) _lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components) _lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Optional[int] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : str = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Dict = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[Any] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]: """simple docstring""" _start_torch_memory_measurement() _lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Any = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) _lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _lowerCamelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) # pipeline 2 _start_torch_memory_measurement() _lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase) _lowerCamelCase : List[str] = pipe_a( prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , ) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) _lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _lowerCamelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""") assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase) def A__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
0